changeset 1346:09ac706c2623

Merge
author asaha
date Wed, 24 Mar 2010 17:16:33 -0700
parents f5dd08ad65df (current diff) bf823ef06b4f (diff)
children 895d9ade6111 5b29c2368d93
files src/share/vm/ci/ciEnv.cpp src/share/vm/classfile/loaderConstraints.cpp src/share/vm/classfile/loaderConstraints.hpp src/share/vm/classfile/systemDictionary.cpp src/share/vm/gc_implementation/g1/ptrQueue.inline.hpp src/share/vm/opto/type.cpp
diffstat 432 files changed, 16265 insertions(+), 4287 deletions(-) [+]
line wrap: on
line diff
--- a/.hgignore	Mon Mar 15 15:51:36 2010 -0400
+++ b/.hgignore	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 ^build/
 ^dist/
-^nbproject/private/
+/nbproject/private/
 ^src/share/tools/hsdis/build/
 ^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/
 ^src/share/tools/IdealGraphVisualizer/build/
--- a/.hgtags	Mon Mar 15 15:51:36 2010 -0400
+++ b/.hgtags	Wed Mar 24 17:16:33 2010 -0700
@@ -51,3 +51,35 @@
 f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 jdk7-b74
 d8dd291a362acb656026a9c0a9da48501505a1e7 jdk7-b75
 9174bb32e934965288121f75394874eeb1fcb649 jdk7-b76
+455105fc81d941482f8f8056afaa7aa0949c9300 jdk7-b77
+e703499b4b51e3af756ae77c3d5e8b3058a14e4e jdk7-b78
+a5a6adfca6ecefb5894a848debabfe442ff50e25 jdk7-b79
+3003ddd1d4330b06cb4691ae74d600d3685899eb jdk7-b80
+1f9b07674480c224828852ffe137beea36b3cab5 jdk7-b81
+1999f5b12482d66c8b0daf6709daea4f51893a04 jdk7-b82
+a94714c550658fd6741793ef036cb9625dc2ab1a hs17-b01
+faf94d94786b621f8e13cbcc941ca69c6d967c3f hs17-b02
+f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 hs17-b03
+d8dd291a362acb656026a9c0a9da48501505a1e7 hs17-b04
+9174bb32e934965288121f75394874eeb1fcb649 hs17-b05
+a5a6adfca6ecefb5894a848debabfe442ff50e25 hs17-b06
+3003ddd1d4330b06cb4691ae74d600d3685899eb hs17-b07
+1f9b07674480c224828852ffe137beea36b3cab5 hs17-b08
+ff3232b68fbb35185b338d7ff4695b52460243f3 hs17-b09
+981375ca07b7f0605f92f57aad95122e8c385a4d hs16-b01
+f4cbf78110c726919f46b59a3b054c54c7e889b4 hs16-b02
+07c1c01e031513bfe6a7d17c6cf30d2752824ae9 hs16-b03
+08f86fa55a31113df626a75c8a626e66a543a1bd hs16-b04
+32c83fb84370a35344676991a48440378e6b6c8a hs16-b05
+ba313800759b678979434d6da8ed3bf49eb8bea4 hs16-b06
+3c0f729815607e1678bd0c41ae68494c700dcc71 hs16-b07
+ac59d4e6dae51ac5fc31a9a4940d1857f91161b1 hs16-b08
+3f844a28c5f4912bd04043b44f21b25b0805ffc2 hs15-b01
+1605bb4eb5a7a1703b13d5b077a22cc665fe45f7 hs15-b02
+2581d90c6c9b2012da930eb4742add94a03069a0 hs15-b03
+9ab385cb0c42997e16a7761ebcd25c90560a2714 hs15-b04
+fafab5d5349c7c066d677538db67a1ee0fb33bd2 hs15-b05
+3f370a32906eb5ba993fabd7b4279be7f31052b9 jdk7-b83
+ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84
+6c9796468b91dcbb39e09dfa1baf9779ac45eb66 jdk7-b85
+418bc80ce13995149eadc9eecbba21d7a9fa02ae hs17-b10
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Mon Mar 15 15:51:36 2010 -0400
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Wed Mar 24 17:16:33 2010 -0700
@@ -63,12 +63,12 @@
     javaSystemLoaderField = type.getOopField("_java_system_loader");
     nofBuckets = db.lookupIntConstant("SystemDictionary::_nof_buckets").intValue();
 
-    objectKlassField = type.getOopField(WK_KLASS("object_klass"));
-    classLoaderKlassField = type.getOopField(WK_KLASS("classloader_klass"));
-    stringKlassField = type.getOopField(WK_KLASS("string_klass"));
-    systemKlassField = type.getOopField(WK_KLASS("system_klass"));
-    threadKlassField = type.getOopField(WK_KLASS("thread_klass"));
-    threadGroupKlassField = type.getOopField(WK_KLASS("threadGroup_klass"));
+    objectKlassField = type.getOopField(WK_KLASS("Object_klass"));
+    classLoaderKlassField = type.getOopField(WK_KLASS("ClassLoader_klass"));
+    stringKlassField = type.getOopField(WK_KLASS("String_klass"));
+    systemKlassField = type.getOopField(WK_KLASS("System_klass"));
+    threadKlassField = type.getOopField(WK_KLASS("Thread_klass"));
+    threadGroupKlassField = type.getOopField(WK_KLASS("ThreadGroup_klass"));
   }
 
   // This WK functions must follow the definitions in systemDictionary.hpp:
--- a/make/Makefile	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/Makefile	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
+# Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -312,10 +312,13 @@
 $(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
 	$(install-file)
 
-# Include files (jvmti.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
+# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
 $(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/%
 	$(install-file)
 
+$(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/%
+	$(install-file)
+
 $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/prims/%
 	$(install-file)
 
--- a/make/defs.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/defs.make	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright 2006-2008 Sun Microsystems, Inc.  All Rights Reserved.
+# Copyright 2006-2010 Sun Microsystems, Inc.  All Rights Reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -261,6 +261,7 @@
 
 # Common export list of files
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmti.h
+EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmticmlr.h
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jni.h
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
--- a/make/hotspot_version	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/hotspot_version	Wed Mar 24 17:16:33 2010 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=17
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=05
+HS_BUILD_NUMBER=10
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/linux/makefiles/debug.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/linux/makefiles/debug.make	Wed Mar 24 17:16:33 2010 -0700
@@ -38,7 +38,7 @@
  "Please use 'make jvmg' to build debug JVM.                            \n" \
  "----------------------------------------------------------------------\n")
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/make/linux/makefiles/fastdebug.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/linux/makefiles/fastdebug.make	Wed Mar 24 17:16:33 2010 -0700
@@ -58,7 +58,7 @@
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = optimized
 SYSDEFS += -DASSERT -DFASTDEBUG
 PICFLAGS = DEFAULT
--- a/make/linux/makefiles/jsig.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/linux/makefiles/jsig.make	Wed Mar 24 17:16:33 2010 -0700
@@ -25,9 +25,12 @@
 # Rules to build signal interposition library, used by vm.make
 
 # libjsig[_g].so: signal interposition library
-JSIG = jsig$(G_SUFFIX)
+JSIG = jsig
 LIBJSIG = lib$(JSIG).so
 
+JSIG_G    = $(JSIG)$(G_SUFFIX)
+LIBJSIG_G = lib$(JSIG_G).so
+
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
 DEST_JSIG  = $(JDK_LIBDIR)/$(LIBJSIG)
@@ -50,6 +53,7 @@
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
+	$(QUIETLY) [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
--- a/make/linux/makefiles/jvmg.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/linux/makefiles/jvmg.make	Wed Mar 24 17:16:33 2010 -0700
@@ -35,7 +35,7 @@
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/make/linux/makefiles/launcher.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/linux/makefiles/launcher.make	Wed Mar 24 17:16:33 2010 -0700
@@ -25,7 +25,9 @@
 # Rules to build gamma launcher, used by vm.make
 
 # gamma[_g]: launcher
-LAUNCHER = gamma$(G_SUFFIX)
+
+LAUNCHER   = gamma
+LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
 
 LAUNCHERDIR   = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
 LAUNCHERFLAGS = $(ARCHFLAG) \
@@ -70,4 +72,5 @@
 	    $(LINK_LAUNCHER/PRE_HOOK) \
 	    $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
 	    $(LINK_LAUNCHER/POST_HOOK) \
+	    [ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
         }
--- a/make/linux/makefiles/saproc.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/linux/makefiles/saproc.make	Wed Mar 24 17:16:33 2010 -0700
@@ -25,9 +25,13 @@
 # Rules to build serviceability agent library, used by vm.make
 
 # libsaproc[_g].so: serviceability agent
-SAPROC = saproc$(G_SUFFIX)
+
+SAPROC = saproc
 LIBSAPROC = lib$(SAPROC).so
 
+SAPROC_G = $(SAPROC)$(G_SUFFIX)
+LIBSAPROC_G = lib$(SAPROC_G).so
+
 AGENT_DIR = $(GAMMADIR)/agent
 
 SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)
@@ -75,6 +79,7 @@
 	           $(SA_DEBUG_CFLAGS)                                   \
 	           -o $@                                                \
 	           -lthread_db
+	$(QUIETLY) [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
 
 install_saproc: checkAndBuildSA
 	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
--- a/make/linux/makefiles/vm.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/linux/makefiles/vm.make	Wed Mar 24 17:16:33 2010 -0700
@@ -113,8 +113,9 @@
 #----------------------------------------------------------------------
 # JVM
 
-JVM    = jvm$(G_SUFFIX)
-LIBJVM = lib$(JVM).so
+JVM      = jvm
+LIBJVM   = lib$(JVM).so
+LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
 
 JVM_OBJ_FILES = $(Obj_Files)
 
@@ -201,6 +202,7 @@
 		       $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM);       \
 	    $(LINK_LIB.CC/POST_HOOK)                                    \
 	    rm -f $@.1; ln -s $@ $@.1;                                  \
+	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
 	    if [ -x /usr/sbin/selinuxenabled ] ; then                   \
 	      /usr/sbin/selinuxenabled;                                 \
               if [ $$? = 0 ] ; then					\
--- a/make/solaris/makefiles/debug.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/solaris/makefiles/debug.make	Wed Mar 24 17:16:33 2010 -0700
@@ -54,7 +54,7 @@
  "Please use 'gnumake jvmg' to build debug JVM.                            \n" \
  "-------------------------------------------------------------------------\n")
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/make/solaris/makefiles/dtrace.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/solaris/makefiles/dtrace.make	Wed Mar 24 17:16:33 2010 -0700
@@ -24,8 +24,8 @@
 
 # Rules to build jvm_db/dtrace, used by vm.make
 
-# we build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
-# but not for CORE configuration
+# We build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
+# but not for CORE or KERNEL configurations.
 
 ifneq ("${TYPE}", "CORE")
 ifneq ("${TYPE}", "KERNEL")
@@ -37,12 +37,13 @@
 
 else
 
-
 JVM_DB = libjvm_db
-LIBJVM_DB = libjvm$(G_SUFFIX)_db.so
+LIBJVM_DB = libjvm_db.so
+LIBJVM_DB_G = libjvm$(G_SUFFIX)_db.so
 
 JVM_DTRACE = jvm_dtrace
-LIBJVM_DTRACE = libjvm$(G_SUFFIX)_dtrace.so
+LIBJVM_DTRACE = libjvm_dtrace.so
+LIBJVM_DTRACE_G = libjvm$(G_SUFFIX)_dtrace.so
 
 JVMOFFS = JvmOffsets
 JVMOFFS.o = $(JVMOFFS).o
@@ -77,7 +78,7 @@
 LFLAGS_JVM_DTRACE += -D_REENTRANT $(PICFLAG)
 else
 LFLAGS_JVM_DB += -mt $(PICFLAG) -xnolib
-LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib
+LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib -ldl
 endif
 
 ISA = $(subst i386,i486,$(shell isainfo -n))
@@ -86,18 +87,24 @@
 ifneq ("${ISA}","${BUILDARCH}")
 
 XLIBJVM_DB = 64/$(LIBJVM_DB)
+XLIBJVM_DB_G = 64/$(LIBJVM_DB_G)
 XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
+XLIBJVM_DTRACE_G = 64/$(LIBJVM_DTRACE_G)
 
 $(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) mkdir -p 64/ ; \
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
+	[ -f $(XLIBJVM_DB_G) ] || { ln -s $(LIBJVM_DB) $(XLIBJVM_DB_G); }
+
 $(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) mkdir -p 64/ ; \
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+	[ -f $(XLIBJVM_DTRACE_G) ] || { ln -s $(LIBJVM_DTRACE) $(XLIBJVM_DTRACE_G); }
+
 endif # ifneq ("${ISA}","${BUILDARCH}")
 
 ifdef USE_GCC
@@ -142,11 +149,13 @@
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
+	[ -f $(LIBJVM_DB_G) ] || { ln -s $@ $(LIBJVM_DB_G); }
 
 $(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+	[ -f $(LIBJVM_DTRACE_G) ] || { ln -s $@ $(LIBJVM_DTRACE_G); }
 
 $(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
              $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
--- a/make/solaris/makefiles/fastdebug.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/solaris/makefiles/fastdebug.make	Wed Mar 24 17:16:33 2010 -0700
@@ -90,7 +90,6 @@
 # for this method for now. (fix this when dtrace bug 6258412 is fixed)
 OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_
 
-
 # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
 
 # If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
@@ -115,8 +114,7 @@
 # and mustn't be otherwise.
 MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
 
-
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = optimized
 SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS
 PICFLAGS = DEFAULT
--- a/make/solaris/makefiles/jsig.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/solaris/makefiles/jsig.make	Wed Mar 24 17:16:33 2010 -0700
@@ -25,8 +25,11 @@
 # Rules to build signal interposition library, used by vm.make
 
 # libjsig[_g].so: signal interposition library
-JSIG = jsig$(G_SUFFIX)
-LIBJSIG = lib$(JSIG).so
+JSIG      = jsig
+LIBJSIG   = lib$(JSIG).so
+
+JSIG_G    = $(JSIG)$(G_SUFFIX)
+LIBJSIG_G = lib$(JSIG_G).so
 
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
@@ -46,6 +49,7 @@
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) -o $@ $< -ldl
+	[ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
--- a/make/solaris/makefiles/jvmg.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/solaris/makefiles/jvmg.make	Wed Mar 24 17:16:33 2010 -0700
@@ -51,7 +51,7 @@
 # and mustn't be otherwise.
 MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
 
-G_SUFFIX =
+G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
 PICFLAGS = DEFAULT
--- a/make/solaris/makefiles/launcher.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/solaris/makefiles/launcher.make	Wed Mar 24 17:16:33 2010 -0700
@@ -25,7 +25,8 @@
 # Rules to build gamma launcher, used by vm.make
 
 # gamma[_g]: launcher
-LAUNCHER = gamma$(G_SUFFIX)
+LAUNCHER   = gamma
+LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
 
 LAUNCHERDIR   = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
 LAUNCHERFLAGS = $(ARCHFLAG) \
@@ -88,5 +89,6 @@
 	    $(LINK_LAUNCHER/PRE_HOOK) \
 	    $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
 	    $(LINK_LAUNCHER/POST_HOOK) \
+	    [ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
 	    ;; \
 	esac
--- a/make/solaris/makefiles/saproc.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/solaris/makefiles/saproc.make	Wed Mar 24 17:16:33 2010 -0700
@@ -25,9 +25,13 @@
 # Rules to build serviceability agent library, used by vm.make
 
 # libsaproc[_g].so: serviceability agent
-SAPROC = saproc$(G_SUFFIX)
+
+SAPROC = saproc
 LIBSAPROC = lib$(SAPROC).so
 
+SAPROC_G = $(SAPROC)$(G_SUFFIX)
+LIBSAPROC_G = lib$(SAPROC_G).so
+
 AGENT_DIR = $(GAMMADIR)/agent
 
 SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)/proc
@@ -69,6 +73,7 @@
 	           $(SA_LFLAGS)                                         \
 	           -o $@                                                \
 	           -ldl -ldemangle -lthread -lc
+	[ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
 
 install_saproc: checkAndBuildSA
 	$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then             \
--- a/make/solaris/makefiles/sparcWorks.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/solaris/makefiles/sparcWorks.make	Wed Mar 24 17:16:33 2010 -0700
@@ -281,8 +281,6 @@
 OPT_CFLAGS=-xO4 $(EXTRA_OPT_CFLAGS)
 endif
 
-CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_sparc/vm/solaris_sparc.il
-
 endif # sparc
 
 ifeq ("${Platform_arch_model}", "x86_32")
@@ -293,13 +291,14 @@
 # [phh] Is this still true for 6.1?
 OPT_CFLAGS+=-xO3
 
-CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_x86/vm/solaris_x86_32.il
-
 endif # 32bit x86
 
 # no more exceptions
 CFLAGS/NOEX=-noex
 
+# Inline functions
+CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_${Platform_arch}/vm/solaris_${Platform_arch_model}.il
+
 # Reduce code bloat by reverting back to 5.0 behavior for static initializers
 CFLAGS += -Qoption ccfe -one_static_init
 
@@ -312,6 +311,15 @@
 PICFLAG/BETTER  = $(PICFLAG/DEFAULT)
 PICFLAG/BYFILE  = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@))
 
+# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
+MAPFLAG = -M FILENAME
+
+# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj
+SONAMEFLAG = -h SONAME
+
+# Build shared library
+SHARED_FLAG = -G
+
 # Would be better if these weren't needed, since we link with CC, but
 # at present removing them causes run-time errors
 LFLAGS += -library=Crun
--- a/make/solaris/makefiles/vm.make	Mon Mar 15 15:51:36 2010 -0400
+++ b/make/solaris/makefiles/vm.make	Wed Mar 24 17:16:33 2010 -0700
@@ -108,11 +108,16 @@
 #   older libm before libCrun, just to make sure it's found and used first.
 LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc
 else
+ifeq ($(COMPILER_REV_NUMERIC), 502)
+# SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
+LIBS += -ldl -lthread -lsocket -lm -lsched -ldoor
+else
 LIBS += -ldl -lthread -lsocket $(LIBM) -lsched -ldoor
-endif
+endif # 502
+endif # 505
 else
 LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc
-endif
+endif # sparcWorks
 
 # By default, link the *.o into the library, not the executable.
 LINK_INTO$(LINK_INTO) = LIBJVM
@@ -126,8 +131,9 @@
 #----------------------------------------------------------------------
 # JVM
 
-JVM    = jvm$(G_SUFFIX)
-LIBJVM = lib$(JVM).so
+JVM      = jvm
+LIBJVM   = lib$(JVM).so
+LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
 
 JVM_OBJ_FILES = $(Obj_Files) $(DTRACE_OBJS)
 
@@ -173,11 +179,12 @@
 	-sbfast|-xsbfast) \
 	    ;; \
 	*) \
-	    echo Linking vm...;                                                  \
-	    $(LINK_LIB.CC/PRE_HOOK)                                              \
-	    $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM);                \
-	    $(LINK_LIB.CC/POST_HOOK)                                             \
-	    rm -f $@.1; ln -s $@ $@.1;                                           \
+	    echo Linking vm...; \
+	    $(LINK_LIB.CC/PRE_HOOK) \
+	    $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
+	    $(LINK_LIB.CC/POST_HOOK) \
+	    rm -f $@.1; ln -s $@ $@.1; \
+	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
 	    ;; \
 	esac
 
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -189,14 +189,17 @@
   Register OSR_buf = osrBufferPointer()->as_register();
   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
     int monitor_offset = BytesPerWord * method()->max_locals() +
-      (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
+      (2 * BytesPerWord) * (number_of_locks - 1);
+    // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
+    // the OSR buffer using 2 word entries: first the lock and then
+    // the oop.
     for (int i = 0; i < number_of_locks; i++) {
-      int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
+      int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 #ifdef ASSERT
       // verify the interpreter's monitor has a non-null object
       {
         Label L;
-        __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
+        __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
         __ cmp(G0, O7);
         __ br(Assembler::notEqual, false, Assembler::pt, L);
         __ delayed()->nop();
@@ -205,9 +208,9 @@
       }
 #endif // ASSERT
       // Copy the lock field into the compiled activation.
-      __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7);
+      __ ld_ptr(OSR_buf, slot_offset + 0, O7);
       __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
-      __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
+      __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
       __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
     }
   }
@@ -354,7 +357,7 @@
 }
 
 
-void LIR_Assembler::emit_exception_handler() {
+int LIR_Assembler::emit_exception_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
@@ -370,15 +373,12 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("exception handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-  compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
-
-
-  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
+
+  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
     __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
     __ delayed()->nop();
   }
@@ -387,11 +387,13 @@
   __ delayed()->nop();
   debug_only(__ stop("should have gone to the caller");)
   assert(code_offset() - offset <= exception_handler_size, "overflow");
-
   __ end_a_stub();
+
+  return offset;
 }
 
-void LIR_Assembler::emit_deopt_handler() {
+
+int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
@@ -405,23 +407,18 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("deopt handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-  compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
-
   AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
-
   __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
   __ delayed()->nop();
-
   assert(code_offset() - offset <= deopt_handler_size, "overflow");
-
   debug_only(__ stop("should have gone to the caller");)
-
   __ end_a_stub();
+
+  return offset;
 }
 
 
@@ -953,9 +950,11 @@
         } else {
 #ifdef _LP64
           assert(base != to_reg->as_register_lo(), "can't handle this");
+          assert(O7 != to_reg->as_register_lo(), "can't handle this");
           __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
+          __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
           __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
-          __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
+          __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
 #else
           if (base == to_reg->as_register_lo()) {
             __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
@@ -976,8 +975,8 @@
           FloatRegister reg = to_reg->as_double_reg();
           // split unaligned loads
           if (unaligned || PatchALot) {
-            __ ldf(FloatRegisterImpl::S, base, offset + BytesPerWord, reg->successor());
-            __ ldf(FloatRegisterImpl::S, base, offset,                reg);
+            __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
+            __ ldf(FloatRegisterImpl::S, base, offset,     reg);
           } else {
             __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
           }
@@ -2200,6 +2199,7 @@
   Register len     = O2;
 
   __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
+  LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
   if (shift == 0) {
     __ add(src_ptr, src_pos, src_ptr);
   } else {
@@ -2208,6 +2208,7 @@
   }
 
   __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
+  LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
   if (shift == 0) {
     __ add(dst_ptr, dst_pos, dst_ptr);
   } else {
@@ -2729,9 +2730,6 @@
   }
 
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
-  __ lduw(counter_addr, tmp1);
-  __ add(tmp1, DataLayout::counter_increment, tmp1);
-  __ stw(tmp1, counter_addr);
   Bytecodes::Code bc = method->java_code_at_bci(bci);
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
@@ -2821,15 +2819,23 @@
         __ set(DataLayout::counter_increment, tmp1);
         __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
                   mdo_offset_bias);
-        if (i < (VirtualCallData::row_limit() - 1)) {
-          __ br(Assembler::always, false, Assembler::pt, update_done);
-          __ delayed()->nop();
-        }
+        __ br(Assembler::always, false, Assembler::pt, update_done);
+        __ delayed()->nop();
         __ bind(next_test);
       }
+      // Receiver did not match any saved receiver and there is no empty row for it.
+      // Increment total counter to indicate polymorphic case.
+      __ lduw(counter_addr, tmp1);
+      __ add(tmp1, DataLayout::counter_increment, tmp1);
+      __ stw(tmp1, counter_addr);
 
       __ bind(update_done);
     }
+  } else {
+    // Static call
+    __ lduw(counter_addr, tmp1);
+    __ add(tmp1, DataLayout::counter_increment, tmp1);
+    __ stw(tmp1, counter_addr);
   }
 }
 
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -144,17 +144,17 @@
   if (index->is_register()) {
     // apply the shift and accumulate the displacement
     if (shift > 0) {
-      LIR_Opr tmp = new_register(T_INT);
+      LIR_Opr tmp = new_pointer_register();
       __ shift_left(index, shift, tmp);
       index = tmp;
     }
     if (disp != 0) {
-      LIR_Opr tmp = new_register(T_INT);
+      LIR_Opr tmp = new_pointer_register();
       if (Assembler::is_simm13(disp)) {
-        __ add(tmp, LIR_OprFact::intConst(disp), tmp);
+        __ add(tmp, LIR_OprFact::intptrConst(disp), tmp);
         index = tmp;
       } else {
-        __ move(LIR_OprFact::intConst(disp), tmp);
+        __ move(LIR_OprFact::intptrConst(disp), tmp);
         __ add(tmp, index, tmp);
         index = tmp;
       }
@@ -162,8 +162,8 @@
     }
   } else if (disp != 0 && !Assembler::is_simm13(disp)) {
     // index is illegal so replace it with the displacement loaded into a register
-    index = new_register(T_INT);
-    __ move(LIR_OprFact::intConst(disp), index);
+    index = new_pointer_register();
+    __ move(LIR_OprFact::intptrConst(disp), index);
     disp = 0;
   }
 
--- a/src/cpu/sparc/vm/c1_globals_sparc.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/c1_globals_sparc.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -22,10 +22,9 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the client compiler.
 // (see c1_globals.hpp)
-//
+
 #ifndef TIERED
 define_pd_global(bool, BackgroundCompilation,        true );
 define_pd_global(bool, CICompileOSR,                 true );
@@ -48,27 +47,24 @@
 define_pd_global(bool, UseTLAB,                      true );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, FreqInlineSize,               325  );
-define_pd_global(intx, NewRatio,                     8    ); // Design center runs on 1.3.1
 define_pd_global(bool, ResizeTLAB,                   true );
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,CodeCacheMinBlockLength,      1);
-define_pd_global(uintx, PermSize,                    12*M );
-define_pd_global(uintx, MaxPermSize,                 64*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true);
+define_pd_global(uintx,PermSize,                     12*M );
+define_pd_global(uintx,MaxPermSize,                  64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
 define_pd_global(intx, NewSizeThreadIncrease,        16*K );
-define_pd_global(uintx, DefaultMaxRAM,               1*G);
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
-#endif // TIERED
+#endif // !TIERED
 
 define_pd_global(bool, UseTypeProfile,               false);
 define_pd_global(bool, RoundFPResults,               false);
 
-
-define_pd_global(bool, LIRFillDelaySlots,            true);
+define_pd_global(bool, LIRFillDelaySlots,            true );
 define_pd_global(bool, OptimizeSinglePrecision,      false);
-define_pd_global(bool, CSEArrayLength,               true);
+define_pd_global(bool, CSEArrayLength,               true );
 define_pd_global(bool, TwoOperandLIRForm,            false);
 
-
-define_pd_global(intx, SafepointPollOffset, 0);
+define_pd_global(intx, SafepointPollOffset,          0    );
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -59,7 +59,6 @@
 define_pd_global(intx, FreqInlineSize,               175);
 define_pd_global(intx, INTPRESSURE,                  48);  // large register set
 define_pd_global(intx, InteriorEntryAlignment,       16);  // = CodeEntryAlignment
-define_pd_global(intx, NewRatio,                     2);
 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
 // The default setting 16/16 seems to work best.
 // (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
@@ -83,25 +82,25 @@
 // sequence of instructions to load a 64 bit pointer.
 //
 // InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,     2048*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize,    48*M);
-define_pd_global(intx, CodeCacheExpansionSize,   64*K);
+define_pd_global(intx, InitialCodeCacheSize,         2048*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, ReservedCodeCacheSize,        48*M);
+define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM,           32*G);
+define_pd_global(uint64_t,MaxRAM,                    128ULL*G);
 #else
 // InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,     1536*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize,    32*M);
-define_pd_global(intx, CodeCacheExpansionSize,   32*K);
+define_pd_global(intx, InitialCodeCacheSize,         1536*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, ReservedCodeCacheSize,        32*M);
+define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM,           1*G);
+define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
 #endif
-define_pd_global(uintx,CodeCacheMinBlockLength,  4);
+define_pd_global(uintx,CodeCacheMinBlockLength,      4);
 
 // Heap related flags
-define_pd_global(uintx, PermSize,    ScaleForWordSize(16*M));
-define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,PermSize,    ScaleForWordSize(16*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
 
 // Ergonomics related flags
 define_pd_global(bool, NeverActAsServerClassMachine, false);
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -366,8 +366,9 @@
   // as get_original_pc() needs correct value for unextended_sp()
   if (_pc != NULL) {
     _cb = CodeCache::find_blob(_pc);
-    if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-      _pc = ((nmethod*)_cb)->get_original_pc(this);
+    address original_pc = nmethod::get_deopt_original_pc(this);
+    if (original_pc != NULL) {
+      _pc = original_pc;
       _deopt_state = is_deoptimized;
     } else {
       _deopt_state = not_deoptimized;
@@ -519,9 +520,9 @@
   _cb = CodeCache::find_blob(pc);
   *O7_addr() = pc - pc_return_offset;
   _cb = CodeCache::find_blob(_pc);
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    address orig = ((nmethod*)_cb)->get_original_pc(this);
-    assert(orig == _pc, "expected original to be stored before patching");
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    assert(original_pc == _pc, "expected original to be stored before patching");
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -22,10 +22,8 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
 
 // For sparc we do not do call backs when a thread is in the interpreter, because the
 // interpreter dispatch needs at least two instructions - first to load the dispatch address
@@ -41,26 +39,23 @@
 define_pd_global(bool, ImplicitNullChecks,          true);  // Generate code for implicit null checks
 define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs past to check cast
 
-define_pd_global(intx,  CodeEntryAlignment,    32);
-define_pd_global(uintx, TLABSize,              0);
-define_pd_global(uintx, NewSize, ScaleForWordSize((2048 * K) + (2 * (64 * K))));
-define_pd_global(intx,  SurvivorRatio,         8);
-define_pd_global(intx,  InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
-define_pd_global(intx,  InlineSmallCode,       1500);
+define_pd_global(intx, CodeEntryAlignment,    32);
+define_pd_global(intx, InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
+define_pd_global(intx, InlineSmallCode,       1500);
 #ifdef _LP64
 // Stack slots are 2X larger in LP64 than in the 32 bit VM.
-define_pd_global(intx,  ThreadStackSize,       1024);
-define_pd_global(intx,  VMThreadStackSize,     1024);
+define_pd_global(intx, ThreadStackSize,       1024);
+define_pd_global(intx, VMThreadStackSize,     1024);
 #else
-define_pd_global(intx,  ThreadStackSize,       512);
-define_pd_global(intx,  VMThreadStackSize,     512);
+define_pd_global(intx, ThreadStackSize,       512);
+define_pd_global(intx, VMThreadStackSize,     512);
 #endif
 
 define_pd_global(intx, StackYellowPages, 2);
 define_pd_global(intx, StackRedPages, 1);
 define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 
-define_pd_global(intx,  PreInflateSpin,        40);  // Determined by running design center
+define_pd_global(intx, PreInflateSpin,       40);  // Determined by running design center
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1681,11 +1681,8 @@
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(profile_continue);
 
-    // We are making a call.  Increment the count.
-    increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
-
     // Record the receiver type.
-    record_klass_in_profile(receiver, scratch);
+    record_klass_in_profile(receiver, scratch, true);
 
     // The method data pointer needs to be updated to reflect the new target.
     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
@@ -1695,9 +1692,13 @@
 
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register scratch,
-                                        int start_row, Label& done) {
-  if (TypeProfileWidth == 0)
+                                        int start_row, Label& done, bool is_virtual_call) {
+  if (TypeProfileWidth == 0) {
+    if (is_virtual_call) {
+      increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
+    }
     return;
+  }
 
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
@@ -1714,6 +1715,7 @@
     // See if the receiver is receiver[n].
     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
     test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
+    // delayed()->tst(scratch);
 
     // The receiver is receiver[n].  Increment count[n].
     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
@@ -1723,20 +1725,31 @@
     bind(next_test);
 
     if (test_for_null_also) {
+      Label found_null;
       // Failed the equality check on receiver[n]...  Test for null.
       if (start_row == last_row) {
         // The only thing left to do is handle the null case.
-        brx(Assembler::notZero, false, Assembler::pt, done);
-        delayed()->nop();
+        if (is_virtual_call) {
+          brx(Assembler::zero, false, Assembler::pn, found_null);
+          delayed()->nop();
+          // Receiver did not match any saved receiver and there is no empty row for it.
+          // Increment total counter to indicate polymorphic case.
+          increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
+          ba(false, done);
+          delayed()->nop();
+          bind(found_null);
+        } else {
+          brx(Assembler::notZero, false, Assembler::pt, done);
+          delayed()->nop();
+        }
         break;
       }
       // Since null is rare, make it be the branch-taken case.
-      Label found_null;
       brx(Assembler::zero, false, Assembler::pn, found_null);
       delayed()->nop();
 
       // Put all the "Case 3" tests here.
-      record_klass_in_profile_helper(receiver, scratch, start_row + 1, done);
+      record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
 
       // Found a null.  Keep searching for a matching receiver,
       // but remember that this is an empty (unused) slot.
@@ -1753,16 +1766,18 @@
   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
   mov(DataLayout::counter_increment, scratch);
   set_mdp_data_at(count_offset, scratch);
-  ba(false, done);
-  delayed()->nop();
+  if (start_row > 0) {
+    ba(false, done);
+    delayed()->nop();
+  }
 }
 
 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
-                                                        Register scratch) {
+                                                        Register scratch, bool is_virtual_call) {
   assert(ProfileInterpreter, "must be profiling");
   Label done;
 
-  record_klass_in_profile_helper(receiver, scratch, 0, done);
+  record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
 
   bind (done);
 }
@@ -1840,7 +1855,7 @@
       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
 
       // Record the object type.
-      record_klass_in_profile(klass, scratch);
+      record_klass_in_profile(klass, scratch, false);
     }
 
     // The method data pointer needs to be updated.
--- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -290,9 +290,9 @@
   void test_mdp_data_at(int offset, Register value, Label& not_equal_continue,
                         Register scratch);
 
-  void record_klass_in_profile(Register receiver, Register scratch);
+  void record_klass_in_profile(Register receiver, Register scratch, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register scratch,
-                                      int start_row, Label& done);
+                                      int start_row, Label& done, bool is_virtual_call);
 
   void update_mdp_by_offset(int offset_of_disp, Register scratch);
   void update_mdp_by_offset(Register reg, int offset_of_disp,
--- a/src/cpu/sparc/vm/interpreter_sparc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/interpreter_sparc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -394,6 +394,11 @@
 }
 
 
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  // No special entry points that preclude compilation
+  return true;
+}
+
 // This method tells the deoptimizer how big an interpreted frame must be:
 int AbstractInterpreter::size_activation(methodOop method,
                                          int tempcount,
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -851,10 +851,10 @@
       __ set(reg2offset(r_1) + extraspace + bias, ld_off);
 #else
       int ld_off = reg2offset(r_1) + extraspace + bias;
+#endif // _LP64
 #ifdef ASSERT
       G1_forced = true;
 #endif // ASSERT
-#endif // _LP64
       r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
       if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
       else                  __ ldx(base, ld_off, G1_scratch);
@@ -865,9 +865,11 @@
       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
         store_c2i_object(r, base, st_off);
       } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
+#ifndef _LP64
         if (TieredCompilation) {
           assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
         }
+#endif // _LP64
         store_c2i_long(r, base, st_off, r_2->is_stack());
       } else {
         store_c2i_int(r, base, st_off);
@@ -1189,7 +1191,8 @@
                                                             // VMReg max_arg,
                                                             int comp_args_on_stack, // VMRegStackSlots
                                                             const BasicType *sig_bt,
-                                                            const VMRegPair *regs) {
+                                                            const VMRegPair *regs,
+                                                            AdapterFingerPrint* fingerprint) {
   address i2c_entry = __ pc();
 
   AdapterGenerator agen(masm);
@@ -1258,7 +1261,7 @@
   agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
   __ flush();
-  return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 
 }
 
--- a/src/cpu/sparc/vm/sparc.ad	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/sparc.ad	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -1885,6 +1885,10 @@
   return RegMask();
 }
 
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+  return RegMask();
+}
+
 %}
 
 
@@ -6664,7 +6668,7 @@
   ins_pipe(ialu_imm);
 %}
 
-instruct cmovII_U_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
+instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
   ins_cost(150);
   size(4);
@@ -6673,7 +6677,7 @@
   ins_pipe(ialu_reg);
 %}
 
-instruct cmovII_U_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
+instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
   match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
   ins_cost(140);
   size(4);
@@ -6719,6 +6723,16 @@
   ins_pipe(ialu_reg);
 %}
 
+// This instruction also works with CmpN so we don't need cmovNN_reg.
+instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
+  match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+  size(4);
+  format %{ "MOV$cmp  $icc,$src,$dst" %}
+  ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(ialu_reg);
+%}
+
 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
   match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
   ins_cost(150);
@@ -6756,6 +6770,16 @@
   ins_pipe(ialu_reg);
 %}
 
+instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "MOV$cmp  $icc,$src,$dst\t! ptr" %}
+  ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(ialu_reg);
+%}
+
 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
   match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
   ins_cost(140);
@@ -6766,6 +6790,16 @@
   ins_pipe(ialu_imm);
 %}
 
+instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+
+  size(4);
+  format %{ "MOV$cmp  $icc,$src,$dst\t! ptr" %}
+  ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(ialu_imm);
+%}
+
 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
   match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
   ins_cost(150);
@@ -6805,6 +6839,17 @@
   ins_pipe(int_conditional_float_move);
 %}
 
+instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
+  match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "FMOVS$cmp $icc,$src,$dst" %}
+  opcode(0x101);
+  ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(int_conditional_float_move);
+%}
+
 // Conditional move,
 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
   match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
@@ -6838,6 +6883,17 @@
   ins_pipe(int_conditional_double_move);
 %}
 
+instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
+  match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "FMOVD$cmp $icc,$src,$dst" %}
+  opcode(0x102);
+  ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(int_conditional_double_move);
+%}
+
 // Conditional move,
 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
   match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
@@ -6877,6 +6933,17 @@
 %}
 
 
+instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "MOV$cmp  $icc,$src,$dst\t! long" %}
+  ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+  ins_pipe(ialu_reg);
+%}
+
+
 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
   match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
   ins_cost(150);
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2862,6 +2862,9 @@
 
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
+
+    // Don't initialize the platform math functions since sparc
+    // doesn't have intrinsics for these operations.
   }
 
 
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -150,8 +150,7 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
-  assert(!unbox, "NYI");//6815692//
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   address compiled_entry = __ pc();
   Label cont;
 
--- a/src/cpu/x86/vm/assembler_x86.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -2251,6 +2251,7 @@
   emit_byte(0x9D);
 }
 
+#ifndef _LP64 // no 32bit push/pop on amd64
 void Assembler::popl(Address dst) {
   // NOTE: this will adjust stack by 8byte on 64bits
   InstructionMark im(this);
@@ -2258,6 +2259,7 @@
   emit_byte(0x8F);
   emit_operand(rax, dst);
 }
+#endif
 
 void Assembler::prefetch_prefix(Address src) {
   prefix(src);
@@ -2428,6 +2430,7 @@
   emit_byte(0x9C);
 }
 
+#ifndef _LP64 // no 32bit push/pop on amd64
 void Assembler::pushl(Address src) {
   // Note this will push 64bit on 64bit
   InstructionMark im(this);
@@ -2435,6 +2438,7 @@
   emit_byte(0xFF);
   emit_operand(rsi, src);
 }
+#endif
 
 void Assembler::pxor(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
@@ -5591,7 +5595,12 @@
 }
 
 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
-  andpd(dst, as_Address(src));
+  if (reachable(src)) {
+    andpd(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    andpd(dst, Address(rscratch1, 0));
+  }
 }
 
 void MacroAssembler::andptr(Register dst, int32_t imm32) {
@@ -6078,11 +6087,21 @@
 }
 
 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
-  comisd(dst, as_Address(src));
+  if (reachable(src)) {
+    comisd(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    comisd(dst, Address(rscratch1, 0));
+  }
 }
 
 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
-  comiss(dst, as_Address(src));
+  if (reachable(src)) {
+    comiss(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    comiss(dst, Address(rscratch1, 0));
+  }
 }
 
 
@@ -7647,7 +7666,7 @@
 
 #ifdef ASSERT
   Label L;
-  testl(tmp, tmp);
+  testptr(tmp, tmp);
   jccb(Assembler::notZero, L);
   hlt();
   bind(L);
--- a/src/cpu/x86/vm/assembler_x86.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1244,7 +1244,9 @@
   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
 
+#ifndef _LP64 // no 32bit push/pop on amd64
   void popl(Address dst);
+#endif
 
 #ifdef _LP64
   void popq(Address dst);
@@ -1285,7 +1287,9 @@
   // Interleave Low Bytes
   void punpcklbw(XMMRegister dst, XMMRegister src);
 
+#ifndef _LP64 // no 32bit push/pop on amd64
   void pushl(Address src);
+#endif
 
   void pushq(Address src);
 
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -301,22 +301,25 @@
   Register OSR_buf = osrBufferPointer()->as_pointer_register();
   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
     int monitor_offset = BytesPerWord * method()->max_locals() +
-      (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
+      (2 * BytesPerWord) * (number_of_locks - 1);
+    // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
+    // the OSR buffer using 2 word entries: first the lock and then
+    // the oop.
     for (int i = 0; i < number_of_locks; i++) {
-      int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
+      int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 #ifdef ASSERT
       // verify the interpreter's monitor has a non-null object
       {
         Label L;
-        __ cmpptr(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
+        __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
         __ jcc(Assembler::notZero, L);
         __ stop("locked object is NULL");
         __ bind(L);
       }
 #endif
-      __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes()));
+      __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
       __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
-      __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()));
+      __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
       __ movptr(frame_map()->address_for_monitor_object(i), rbx);
     }
   }
@@ -415,13 +418,12 @@
 }
 
 
-void LIR_Assembler::emit_exception_handler() {
+int LIR_Assembler::emit_exception_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
   // failures when searching for the corresponding bci => add a nop
   // (was bug 5/14/1999 - gri)
-
   __ nop();
 
   // generate code for exception handler
@@ -429,17 +431,14 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("exception handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-
-  compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
 
   // if the method does not have an exception handler, then there is
   // no reason to search for one
-  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
+  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
     // the exception oop and pc are in rax, and rdx
     // no other registers need to be preserved, so invalidate them
     __ invalidate_registers(false, true, true, false, true, true);
@@ -471,19 +470,19 @@
   // unwind activation and forward exception to caller
   // rax,: exception
   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
-
   assert(code_offset() - offset <= exception_handler_size, "overflow");
-
   __ end_a_stub();
+
+  return offset;
 }
 
-void LIR_Assembler::emit_deopt_handler() {
+
+int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
   // failures when searching for the corresponding bci => add a nop
   // (was bug 5/14/1999 - gri)
-
   __ nop();
 
   // generate code for exception handler
@@ -491,23 +490,17 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("deopt handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-
-  compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
-
   InternalAddress here(__ pc());
   __ pushptr(here.addr());
-
   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
-
   assert(code_offset() - offset <= deopt_handler_size, "overflow");
-
   __ end_a_stub();
 
+  return offset;
 }
 
 
@@ -785,7 +778,13 @@
           ShouldNotReachHere();
           __ movoop(as_Address(addr, noreg), c->as_jobject());
         } else {
+#ifdef _LP64
+          __ movoop(rscratch1, c->as_jobject());
+          null_check_here = code_offset();
+          __ movptr(as_Address_lo(addr), rscratch1);
+#else
           __ movoop(as_Address(addr), c->as_jobject());
+#endif
         }
       }
       break;
@@ -1118,8 +1117,14 @@
       __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
       __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
     } else {
+#ifndef _LP64
       __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
       __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
+#else
+      //no pushl on 64bits
+      __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
+      __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
+#endif
     }
 
   } else if (src->is_double_stack()) {
@@ -3136,8 +3141,10 @@
 
 #ifdef _LP64
   assert_different_registers(c_rarg0, dst, dst_pos, length);
+  __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
   __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
   assert_different_registers(c_rarg1, length);
+  __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
   __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
   __ mov(c_rarg2, length);
 
@@ -3202,7 +3209,6 @@
   Register mdo  = op->mdo()->as_register();
   __ movoop(mdo, md->constant_encoding());
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
-  __ addl(counter_addr, DataLayout::counter_increment);
   Bytecodes::Code bc = method->java_code_at_bci(bci);
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
@@ -3269,14 +3275,18 @@
         __ jcc(Assembler::notEqual, next_test);
         __ movptr(recv_addr, recv);
         __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
-        if (i < (VirtualCallData::row_limit() - 1)) {
-          __ jmp(update_done);
-        }
+        __ jmp(update_done);
         __ bind(next_test);
       }
+      // Receiver did not match any saved receiver and there is no empty row for it.
+      // Increment total counter to indicate polymorphic case.
+      __ addl(counter_addr, DataLayout::counter_increment);
 
       __ bind(update_done);
     }
+  } else {
+    // Static call
+    __ addl(counter_addr, DataLayout::counter_increment);
   }
 }
 
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -755,8 +755,19 @@
   }
 
   LIR_Opr addr = new_pointer_register();
-  __ move(obj.result(), addr);
-  __ add(addr, offset.result(), addr);
+  LIR_Address* a;
+  if(offset.result()->is_constant()) {
+    a = new LIR_Address(obj.result(),
+                        NOT_LP64(offset.result()->as_constant_ptr()->as_jint()) LP64_ONLY((int)offset.result()->as_constant_ptr()->as_jlong()),
+                        as_BasicType(type));
+  } else {
+    a = new LIR_Address(obj.result(),
+                        offset.result(),
+                        LIR_Address::times_1,
+                        0,
+                        as_BasicType(type));
+  }
+  __ leal(LIR_OprFact::address(a), addr);
 
   if (type == objectType) {  // Write-barrier needed for Object fields.
     // Do the pre-write barrier, if any.
--- a/src/cpu/x86/vm/c1_globals_x86.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/c1_globals_x86.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -22,10 +22,8 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the client compiler.
 // (see c1_globals.hpp)
-//
 
 #ifndef TIERED
 define_pd_global(bool, BackgroundCompilation,        true );
@@ -48,27 +46,24 @@
 
 define_pd_global(intx, OnStackReplacePercentage,     933  );
 define_pd_global(intx, FreqInlineSize,               325  );
-define_pd_global(intx, NewRatio,                     12   );
 define_pd_global(intx, NewSizeThreadIncrease,        4*K  );
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,CodeCacheMinBlockLength,      1);
-define_pd_global(uintx, PermSize,                    12*M );
-define_pd_global(uintx, MaxPermSize,                 64*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true);
-define_pd_global(uintx, DefaultMaxRAM,               1*G);
+define_pd_global(uintx,PermSize,                     12*M );
+define_pd_global(uintx,MaxPermSize,                  64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
 define_pd_global(bool, CICompileOSR,                 true );
-#endif // TIERED
+#endif // !TIERED
 define_pd_global(bool, UseTypeProfile,               false);
 define_pd_global(bool, RoundFPResults,               true );
 
-
 define_pd_global(bool, LIRFillDelaySlots,            false);
-define_pd_global(bool, OptimizeSinglePrecision,      true);
+define_pd_global(bool, OptimizeSinglePrecision,      true );
 define_pd_global(bool, CSEArrayLength,               false);
-define_pd_global(bool, TwoOperandLIRForm,            true);
+define_pd_global(bool, TwoOperandLIRForm,            true );
 
-
-define_pd_global(intx, SafepointPollOffset, 256);
+define_pd_global(intx, SafepointPollOffset,          256  );
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -22,7 +22,6 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the server compiler.
 // (see c2_globals.hpp).  Alpha-sorted.
 
@@ -46,8 +45,8 @@
 define_pd_global(intx, CompileThreshold,             10000);
 #endif // TIERED
 define_pd_global(intx, Tier2CompileThreshold,        10000);
-define_pd_global(intx, Tier3CompileThreshold,        20000 );
-define_pd_global(intx, Tier4CompileThreshold,        40000 );
+define_pd_global(intx, Tier3CompileThreshold,        20000);
+define_pd_global(intx, Tier4CompileThreshold,        40000);
 
 define_pd_global(intx, BackEdgeThreshold,            100000);
 define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
@@ -61,7 +60,6 @@
 #ifdef AMD64
 define_pd_global(intx, INTPRESSURE,                  13);
 define_pd_global(intx, InteriorEntryAlignment,       16);
-define_pd_global(intx, NewRatio,                     2);
 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
 define_pd_global(intx, LoopUnrollLimit,              60);
 // InitialCodeCacheSize derived from specjbb2000 run.
@@ -69,19 +67,18 @@
 define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 32*G);
+define_pd_global(uint64_t,MaxRAM,                    128ULL*G);
 #else
 define_pd_global(intx, INTPRESSURE,                  6);
 define_pd_global(intx, InteriorEntryAlignment,       4);
-define_pd_global(intx, NewRatio,                     8); // Design center runs on 1.3.1
 define_pd_global(intx, NewSizeThreadIncrease,        4*K);
-define_pd_global(intx, LoopUnrollLimit,              50); // Design center runs on 1.3.1
+define_pd_global(intx, LoopUnrollLimit,              50);     // Design center runs on 1.3.1
 // InitialCodeCacheSize derived from specjbb2000 run.
 define_pd_global(intx, InitialCodeCacheSize,         2304*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 1*G);
+define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
 #endif // AMD64
 define_pd_global(intx, OptoLoopAlignment,            16);
 define_pd_global(intx, RegisterCostAreaRatio,        16000);
@@ -97,8 +94,8 @@
 define_pd_global(uintx,CodeCacheMinBlockLength,      4);
 
 // Heap related flags
-define_pd_global(uintx, PermSize,    ScaleForWordSize(16*M));
-define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,PermSize,    ScaleForWordSize(16*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
 
 // Ergonomics related flags
 define_pd_global(bool, NeverActAsServerClassMachine, false);
--- a/src/cpu/x86/vm/frame_x86.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/frame_x86.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,9 +222,9 @@
   }
   ((address *)sp())[-1] = pc;
   _cb = CodeCache::find_blob(pc);
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    address orig = (((nmethod*)_cb)->get_original_pc(this));
-    assert(orig == _pc, "expected original to be stored before patching");
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    assert(original_pc == _pc, "expected original PC to be stored before patching");
     _deopt_state = is_deoptimized;
     // leave _pc as is
   } else {
@@ -323,13 +323,63 @@
   return fr;
 }
 
+
+//------------------------------------------------------------------------------
+// frame::verify_deopt_original_pc
+//
+// Verifies the calculated original PC of a deoptimization PC for the
+// given unextended SP.  The unextended SP might also be the saved SP
+// for MethodHandle call sites.
+#if ASSERT
+void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
+  frame fr;
+
+  // This is ugly but it's better than to change {get,set}_original_pc
+  // to take an SP value as argument.  And it's only a debugging
+  // method anyway.
+  fr._unextended_sp = unextended_sp;
+
+  address original_pc = nm->get_original_pc(&fr);
+  assert(nm->code_contains(original_pc), "original PC must be in nmethod");
+  assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
+}
+#endif
+
+
+//------------------------------------------------------------------------------
+// frame::sender_for_interpreter_frame
 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
-  // sp is the raw sp from the sender after adapter or interpreter extension
-  intptr_t* sp = (intptr_t*) addr_at(sender_sp_offset);
+  // SP is the raw SP from the sender after adapter or interpreter
+  // extension.
+  intptr_t* sender_sp = this->sender_sp();
 
   // This is the sp before any possible extension (adapter/locals).
   intptr_t* unextended_sp = interpreter_frame_sender_sp();
 
+  // Stored FP.
+  intptr_t* saved_fp = link();
+
+  address sender_pc = this->sender_pc();
+  CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
+  assert(sender_cb, "sanity");
+  nmethod* sender_nm = sender_cb->as_nmethod_or_null();
+
+  if (sender_nm != NULL) {
+    // If the sender PC is a deoptimization point, get the original
+    // PC.  For MethodHandle call site the unextended_sp is stored in
+    // saved_fp.
+    if (sender_nm->is_deopt_mh_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
+      unextended_sp = saved_fp;
+    }
+    else if (sender_nm->is_deopt_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
+    }
+    else if (sender_nm->is_method_handle_return(sender_pc)) {
+      unextended_sp = saved_fp;
+    }
+  }
+
   // The interpreter and compiler(s) always save EBP/RBP in a known
   // location on entry. We must record where that location is
   // so this if EBP/RBP was live on callout from c2 we can find
@@ -351,29 +401,52 @@
     }
 #endif // AMD64
   }
-#endif /* COMPILER2 */
-  return frame(sp, unextended_sp, link(), sender_pc());
+#endif // COMPILER2
+
+  return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
 }
 
 
-//------------------------------sender_for_compiled_frame-----------------------
+//------------------------------------------------------------------------------
+// frame::sender_for_compiled_frame
 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
   assert(map != NULL, "map must be set");
-  const bool c1_compiled = _cb->is_compiled_by_c1();
 
   // frame owned by optimizing compiler
-  intptr_t* sender_sp = NULL;
-
   assert(_cb->frame_size() >= 0, "must have non-zero frame size");
-  sender_sp = unextended_sp() + _cb->frame_size();
+  intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
+  intptr_t* unextended_sp = sender_sp;
 
   // On Intel the return_address is always the word on the stack
   address sender_pc = (address) *(sender_sp-1);
 
-  // This is the saved value of ebp which may or may not really be an fp.
-  // it is only an fp if the sender is an interpreter frame (or c1?)
+  // This is the saved value of EBP which may or may not really be an FP.
+  // It is only an FP if the sender is an interpreter frame (or C1?).
+  intptr_t* saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
+
+  // If we are returning to a compiled MethodHandle call site, the
+  // saved_fp will in fact be a saved value of the unextended SP.  The
+  // simplest way to tell whether we are returning to such a call site
+  // is as follows:
+  CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
+  assert(sender_cb, "sanity");
+  nmethod* sender_nm = sender_cb->as_nmethod_or_null();
 
-  intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
+  if (sender_nm != NULL) {
+    // If the sender PC is a deoptimization point, get the original
+    // PC.  For MethodHandle call site the unextended_sp is stored in
+    // saved_fp.
+    if (sender_nm->is_deopt_mh_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
+      unextended_sp = saved_fp;
+    }
+    else if (sender_nm->is_deopt_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
+    }
+    else if (sender_nm->is_method_handle_return(sender_pc)) {
+      unextended_sp = saved_fp;
+    }
+  }
 
   if (map->update_map()) {
     // Tell GC to use argument oopmaps for some runtime stubs that need it.
@@ -383,7 +456,7 @@
     if (_cb->oop_maps() != NULL) {
       OopMapSet::update_register_map(this, map);
     }
-    // Since the prolog does the save and restore of epb there is no oopmap
+    // Since the prolog does the save and restore of EBP there is no oopmap
     // for it so we must fill in its location as if there was an oopmap entry
     // since if our caller was compiled code there could be live jvm state in it.
     map->set_location(rbp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset));
@@ -399,9 +472,12 @@
   }
 
   assert(sender_sp != sp(), "must have changed");
-  return frame(sender_sp, saved_fp, sender_pc);
+  return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
 }
 
+
+//------------------------------------------------------------------------------
+// frame::sender
 frame frame::sender(RegisterMap* map) const {
   // Default is we done have to follow them. The sender_for_xxx will
   // update it accordingly
--- a/src/cpu/x86/vm/frame_x86.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/frame_x86.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -163,6 +163,14 @@
     return (intptr_t*) addr_at(offset);
   }
 
+#if ASSERT
+  // Used in frame::sender_for_{interpreter,compiled}_frame
+  static void verify_deopt_original_pc(   nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
+  static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
+    verify_deopt_original_pc(nm, unextended_sp, true);
+  }
+#endif
+
  public:
   // Constructors
 
--- a/src/cpu/x86/vm/frame_x86.inline.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,32 +35,35 @@
   _deopt_state = unknown;
 }
 
-inline frame:: frame(intptr_t* sp, intptr_t* fp, address pc) {
+inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
   _sp = sp;
   _unextended_sp = sp;
   _fp = fp;
   _pc = pc;
   assert(pc != NULL, "no pc?");
   _cb = CodeCache::find_blob(pc);
-  _deopt_state = not_deoptimized;
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    _pc = (((nmethod*)_cb)->get_original_pc(this));
+
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
   }
 }
 
-inline frame:: frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
+inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
   _sp = sp;
   _unextended_sp = unextended_sp;
   _fp = fp;
   _pc = pc;
   assert(pc != NULL, "no pc?");
   _cb = CodeCache::find_blob(pc);
-  _deopt_state = not_deoptimized;
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    _pc = (((nmethod*)_cb)->get_original_pc(this));
+
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
+    assert(((nmethod*)_cb)->code_contains(_pc), "original PC must be in nmethod");
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
@@ -86,9 +89,9 @@
 
   _cb = CodeCache::find_blob(_pc);
 
-  _deopt_state = not_deoptimized;
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    _pc = (((nmethod*)_cb)->get_original_pc(this));
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
@@ -225,11 +228,13 @@
 // top of expression stack
 inline intptr_t* frame::interpreter_frame_tos_address() const {
   intptr_t* last_sp = interpreter_frame_last_sp();
-  if (last_sp == NULL ) {
+  if (last_sp == NULL) {
     return sp();
   } else {
-    // sp() may have been extended by an adapter
-    assert(last_sp < fp() && last_sp >= sp(), "bad tos");
+    // sp() may have been extended or shrunk by an adapter.  At least
+    // check that we don't fall behind the legal region.
+    // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
+    assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
     return last_sp;
   }
 }
--- a/src/cpu/x86/vm/globals_x86.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/globals_x86.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -22,17 +22,16 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
 
-define_pd_global(bool,  ConvertSleepToYield,      true);
-define_pd_global(bool,  ShareVtableStubs,         true);
-define_pd_global(bool,  CountInterpCalls,         true);
+define_pd_global(bool, ConvertSleepToYield,      true);
+define_pd_global(bool, ShareVtableStubs,         true);
+define_pd_global(bool, CountInterpCalls,         true);
+define_pd_global(bool, NeedsDeoptSuspend,        false); // only register window machines need this
 
-define_pd_global(bool, ImplicitNullChecks,          true);  // Generate code for implicit null checks
-define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs past to check cast
+define_pd_global(bool, ImplicitNullChecks,       true);  // Generate code for implicit null checks
+define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap NULLs past to check cast
 
 // See 4827828 for this change. There is no globals_core_i486.hpp. I can't
 // assign a different value for C2 without touching a number of files. Use
@@ -42,29 +41,24 @@
 // the uep and the vep doesn't get real alignment but just slops on by
 // only assured that the entry instruction meets the 5 byte size requirement.
 #ifdef COMPILER2
-define_pd_global(intx,  CodeEntryAlignment,       32);
+define_pd_global(intx, CodeEntryAlignment,       32);
 #else
-define_pd_global(intx,  CodeEntryAlignment,       16);
+define_pd_global(intx, CodeEntryAlignment,       16);
 #endif // COMPILER2
+define_pd_global(intx, InlineFrequencyCount,     100);
+define_pd_global(intx, InlineSmallCode,          1000);
 
-define_pd_global(bool, NeedsDeoptSuspend,           false); // only register window machines need this
-
-define_pd_global(uintx, TLABSize,                 0);
+define_pd_global(intx, StackYellowPages, 2);
+define_pd_global(intx, StackRedPages, 1);
 #ifdef AMD64
-define_pd_global(uintx, NewSize, ScaleForWordSize(2048 * K));
 // Very large C++ stack frames using solaris-amd64 optimized builds
 // due to lack of optimization caused by C++ compiler bugs
 define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
 #else
-define_pd_global(uintx, NewSize,                  1024 * K);
 define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 #endif // AMD64
-define_pd_global(intx,  InlineFrequencyCount,     100);
-define_pd_global(intx,  InlineSmallCode,          1000);
-define_pd_global(intx,  PreInflateSpin,           10);
 
-define_pd_global(intx, StackYellowPages, 2);
-define_pd_global(intx, StackRedPages, 1);
+define_pd_global(intx, PreInflateSpin,           10);
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -196,6 +196,9 @@
   } else {
     assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
     movl(reg, Address(rsi, bcp_offset));
+    // Check if the secondary index definition is still ~x, otherwise
+    // we have to change the following assembler code to calculate the
+    // plain index.
     assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
     notl(reg);  // convert to plain index
   }
@@ -1236,17 +1239,19 @@
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(mdp, profile_continue);
 
-    // We are making a call.  Increment the count.
-    increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
-
     Label skip_receiver_profile;
     if (receiver_can_be_null) {
+      Label not_null;
       testptr(receiver, receiver);
-      jcc(Assembler::zero, skip_receiver_profile);
+      jccb(Assembler::notZero, not_null);
+      // We are making a call.  Increment the count for null receiver.
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+      jmp(skip_receiver_profile);
+      bind(not_null);
     }
 
     // Record the receiver type.
-    record_klass_in_profile(receiver, mdp, reg2);
+    record_klass_in_profile(receiver, mdp, reg2, true);
     bind(skip_receiver_profile);
 
     // The method data pointer needs to be updated to reflect the new target.
@@ -1260,10 +1265,14 @@
 
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register mdp,
-                                        Register reg2,
-                                        int start_row, Label& done) {
-  if (TypeProfileWidth == 0)
+                                        Register reg2, int start_row,
+                                        Label& done, bool is_virtual_call) {
+  if (TypeProfileWidth == 0) {
+    if (is_virtual_call) {
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+    }
     return;
+  }
 
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
@@ -1291,19 +1300,28 @@
     bind(next_test);
 
     if (row == start_row) {
+      Label found_null;
       // Failed the equality check on receiver[n]...  Test for null.
       testptr(reg2, reg2);
       if (start_row == last_row) {
         // The only thing left to do is handle the null case.
-        jcc(Assembler::notZero, done);
+        if (is_virtual_call) {
+          jccb(Assembler::zero, found_null);
+          // Receiver did not match any saved receiver and there is no empty row for it.
+          // Increment total counter to indicate polymorphic case.
+          increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+          jmp(done);
+          bind(found_null);
+        } else {
+          jcc(Assembler::notZero, done);
+        }
         break;
       }
       // Since null is rare, make it be the branch-taken case.
-      Label found_null;
       jcc(Assembler::zero, found_null);
 
       // Put all the "Case 3" tests here.
-      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
+      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
 
       // Found a null.  Keep searching for a matching receiver,
       // but remember that this is an empty (unused) slot.
@@ -1320,16 +1338,18 @@
   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
   movptr(reg2, (int32_t)DataLayout::counter_increment);
   set_mdp_data_at(mdp, count_offset, reg2);
-  jmp(done);
+  if (start_row > 0) {
+    jmp(done);
+  }
 }
 
 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
-                                                        Register mdp,
-                                                        Register reg2) {
+                                                        Register mdp, Register reg2,
+                                                        bool is_virtual_call) {
   assert(ProfileInterpreter, "must be profiling");
   Label done;
 
-  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
+  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
 
   bind (done);
 }
@@ -1422,7 +1442,7 @@
       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
 
       // Record the object type.
-      record_klass_in_profile(klass, mdp, reg2);
+      record_klass_in_profile(klass, mdp, reg2, false);
       assert(reg2 == rdi, "we know how to fix this blown reg");
       restore_locals();         // Restore EDI
     }
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -213,10 +213,10 @@
                         Label& not_equal_continue);
 
   void record_klass_in_profile(Register receiver, Register mdp,
-                               Register reg2);
+                               Register reg2, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register mdp,
-                                      Register reg2,
-                                      int start_row, Label& done);
+                                      Register reg2, int start_row,
+                                      Label& done, bool is_virtual_call);
 
   void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
   void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -185,12 +185,30 @@
 }
 
 
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
+                                                       int bcp_offset,
+                                                       bool giant_index) {
+  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+  if (!giant_index) {
+    load_unsigned_short(index, Address(r13, bcp_offset));
+  } else {
+    assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
+    movl(index, Address(r13, bcp_offset));
+    // Check if the secondary index definition is still ~x, otherwise
+    // we have to change the following assembler code to calculate the
+    // plain index.
+    assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
+    notl(index);  // convert to plain index
+  }
+}
+
+
 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
                                                            Register index,
-                                                           int bcp_offset) {
-  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+                                                           int bcp_offset,
+                                                           bool giant_index) {
   assert(cache != index, "must use different registers");
-  load_unsigned_short(index, Address(r13, bcp_offset));
+  get_cache_index_at_bcp(index, bcp_offset, giant_index);
   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   // convert from field index to ConstantPoolCacheEntry index
@@ -200,10 +218,10 @@
 
 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
                                                                Register tmp,
-                                                               int bcp_offset) {
-  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+                                                               int bcp_offset,
+                                                               bool giant_index) {
   assert(cache != tmp, "must use different register");
-  load_unsigned_short(tmp, Address(r13, bcp_offset));
+  get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   // convert from field index to ConstantPoolCacheEntry index
   // and from word offset to byte offset
@@ -1236,18 +1254,28 @@
 
 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
                                                      Register mdp,
-                                                     Register reg2) {
+                                                     Register reg2,
+                                                     bool receiver_can_be_null) {
   if (ProfileInterpreter) {
     Label profile_continue;
 
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(mdp, profile_continue);
 
-    // We are making a call.  Increment the count.
-    increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+    Label skip_receiver_profile;
+    if (receiver_can_be_null) {
+      Label not_null;
+      testptr(receiver, receiver);
+      jccb(Assembler::notZero, not_null);
+      // We are making a call.  Increment the count for null receiver.
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+      jmp(skip_receiver_profile);
+      bind(not_null);
+    }
 
     // Record the receiver type.
-    record_klass_in_profile(receiver, mdp, reg2);
+    record_klass_in_profile(receiver, mdp, reg2, true);
+    bind(skip_receiver_profile);
 
     // The method data pointer needs to be updated to reflect the new target.
     update_mdp_by_constant(mdp,
@@ -1270,10 +1298,14 @@
 // See below for example code.
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register mdp,
-                                        Register reg2,
-                                        int start_row, Label& done) {
-  if (TypeProfileWidth == 0)
+                                        Register reg2, int start_row,
+                                        Label& done, bool is_virtual_call) {
+  if (TypeProfileWidth == 0) {
+    if (is_virtual_call) {
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+    }
     return;
+  }
 
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
@@ -1301,19 +1333,28 @@
     bind(next_test);
 
     if (test_for_null_also) {
+      Label found_null;
       // Failed the equality check on receiver[n]...  Test for null.
       testptr(reg2, reg2);
       if (start_row == last_row) {
         // The only thing left to do is handle the null case.
-        jcc(Assembler::notZero, done);
+        if (is_virtual_call) {
+          jccb(Assembler::zero, found_null);
+          // Receiver did not match any saved receiver and there is no empty row for it.
+          // Increment total counter to indicate polymorphic case.
+          increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+          jmp(done);
+          bind(found_null);
+        } else {
+          jcc(Assembler::notZero, done);
+        }
         break;
       }
       // Since null is rare, make it be the branch-taken case.
-      Label found_null;
       jcc(Assembler::zero, found_null);
 
       // Put all the "Case 3" tests here.
-      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
+      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
 
       // Found a null.  Keep searching for a matching receiver,
       // but remember that this is an empty (unused) slot.
@@ -1330,7 +1371,9 @@
   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
   movl(reg2, DataLayout::counter_increment);
   set_mdp_data_at(mdp, count_offset, reg2);
-  jmp(done);
+  if (start_row > 0) {
+    jmp(done);
+  }
 }
 
 // Example state machine code for three profile rows:
@@ -1342,7 +1385,7 @@
 //     if (row[1].rec != NULL) {
 //       // degenerate decision tree, rooted at row[2]
 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
-//       if (row[2].rec != NULL) { goto done; } // overflow
+//       if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
 //       row[2].init(rec); goto done;
 //     } else {
 //       // remember row[1] is empty
@@ -1355,14 +1398,15 @@
 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
 //     row[0].init(rec); goto done;
 //   }
+//   done:
 
 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
-                                                        Register mdp,
-                                                        Register reg2) {
+                                                        Register mdp, Register reg2,
+                                                        bool is_virtual_call) {
   assert(ProfileInterpreter, "must be profiling");
   Label done;
 
-  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
+  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
 
   bind (done);
 }
@@ -1458,7 +1502,7 @@
       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
 
       // Record the object type.
-      record_klass_in_profile(klass, mdp, reg2);
+      record_klass_in_profile(klass, mdp, reg2, false);
     }
     update_mdp_by_constant(mdp, mdp_delta);
 
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -95,9 +95,10 @@
 
   void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
   void get_cache_and_index_at_bcp(Register cache, Register index,
-                                  int bcp_offset);
+                                  int bcp_offset, bool giant_index = false);
   void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
-                                      int bcp_offset);
+                                      int bcp_offset, bool giant_index = false);
+  void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
 
 
   void pop_ptr(Register r = rax);
@@ -221,10 +222,10 @@
                         Label& not_equal_continue);
 
   void record_klass_in_profile(Register receiver, Register mdp,
-                               Register reg2);
+                               Register reg2, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register mdp,
-                                      Register reg2,
-                                      int start_row, Label& done);
+                                      Register reg2, int start_row,
+                                      Label& done, bool is_virtual_call);
 
   void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
   void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
@@ -236,7 +237,8 @@
   void profile_call(Register mdp);
   void profile_final_call(Register mdp);
   void profile_virtual_call(Register receiver, Register mdp,
-                            Register scratch2);
+                            Register scratch2,
+                            bool receiver_can_be_null = false);
   void profile_ret(Register return_bci, Register mdp);
   void profile_null_seen(Register mdp);
   void profile_typecheck(Register mdp, Register klass, Register scratch);
--- a/src/cpu/x86/vm/interpreter_x86_64.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/interpreter_x86_64.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -277,12 +277,11 @@
   address entry_point = __ pc();
 
   // abstract method entry
-  // remove return address. Not really needed, since exception
-  // handling throws away expression stack
-  __ pop(rbx);
 
-  // adjust stack to what a normal return would do
-  __ mov(rsp, r13);
+  //  pop return address, reset last_sp to NULL
+  __ empty_expression_stack();
+  __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
+  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
 
   // throw exception
   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
@@ -300,7 +299,10 @@
   if (!EnableMethodHandles) {
     return generate_abstract_entry();
   }
-  return generate_abstract_entry(); //6815692//
+
+  address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm);
+
+  return entry_point;
 }
 
 
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -65,9 +65,9 @@
   // Verify that argslot lies within (rsp, rbp].
   Label L_ok, L_bad;
   __ cmpptr(rax_argslot, rbp);
-  __ jcc(Assembler::above, L_bad);
+  __ jccb(Assembler::above, L_bad);
   __ cmpptr(rsp, rax_argslot);
-  __ jcc(Assembler::below, L_ok);
+  __ jccb(Assembler::below, L_ok);
   __ bind(L_bad);
   __ stop(error_message);
   __ bind(L_ok);
@@ -136,9 +136,9 @@
   if (arg_slots.is_register()) {
     Label L_ok, L_bad;
     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ jcc(Assembler::greater, L_bad);
+    __ jccb(Assembler::greater, L_bad);
     __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
-    __ jcc(Assembler::zero, L_ok);
+    __ jccb(Assembler::zero, L_ok);
     __ bind(L_bad);
     __ stop("assert arg_slots <= 0 and clear low bits");
     __ bind(L_ok);
@@ -173,7 +173,7 @@
     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
     __ addptr(rdx_temp, wordSize);
     __ cmpptr(rdx_temp, rax_argslot);
-    __ jcc(Assembler::less, loop);
+    __ jccb(Assembler::less, loop);
   }
 
   // Now move the argslot down, to point to the opened-up space.
@@ -211,9 +211,9 @@
     Label L_ok, L_bad;
     __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
     __ cmpptr(rbx_temp, rbp);
-    __ jcc(Assembler::above, L_bad);
+    __ jccb(Assembler::above, L_bad);
     __ cmpptr(rsp, rax_argslot);
-    __ jcc(Assembler::below, L_ok);
+    __ jccb(Assembler::below, L_ok);
     __ bind(L_bad);
     __ stop("deleted argument(s) must fall within current frame");
     __ bind(L_ok);
@@ -221,9 +221,9 @@
   if (arg_slots.is_register()) {
     Label L_ok, L_bad;
     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ jcc(Assembler::less, L_bad);
+    __ jccb(Assembler::less, L_bad);
     __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
-    __ jcc(Assembler::zero, L_ok);
+    __ jccb(Assembler::zero, L_ok);
     __ bind(L_bad);
     __ stop("assert arg_slots >= 0 and clear low bits");
     __ bind(L_ok);
@@ -258,7 +258,7 @@
     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
     __ addptr(rdx_temp, -wordSize);
     __ cmpptr(rdx_temp, rsp);
-    __ jcc(Assembler::greaterEqual, loop);
+    __ jccb(Assembler::greaterEqual, loop);
   }
 
   // Now move the argslot up, to point to the just-copied block.
@@ -268,8 +268,9 @@
 }
 
 #ifndef PRODUCT
+extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
-                              oopDesc* mh,
+                              oop mh,
                               intptr_t* entry_sp,
                               intptr_t* saved_sp,
                               intptr_t* saved_bp) {
@@ -280,6 +281,7 @@
          adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
   if (last_sp != saved_sp)
     printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
+  if (Verbose)  print_method_handle(mh);
 }
 #endif //PRODUCT
 
@@ -382,11 +384,11 @@
       // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
       __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
       __ testptr(rbx_method, rbx_method);
-      __ jcc(Assembler::zero, no_method);
+      __ jccb(Assembler::zero, no_method);
       int jobject_oop_offset = 0;
       __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
       __ testptr(rbx_method, rbx_method);
-      __ jcc(Assembler::zero, no_method);
+      __ jccb(Assembler::zero, no_method);
       __ verify_oop(rbx_method);
       __ push(rdi_pc);          // and restore caller PC
       __ jmp(rbx_method_fie);
@@ -448,7 +450,7 @@
                                 rbx_index, Address::times_ptr,
                                 base + vtableEntry::method_offset_in_bytes());
       Register rbx_method = rbx_temp;
-      __ movl(rbx_method, vtable_entry_addr);
+      __ movptr(rbx_method, vtable_entry_addr);
 
       __ verify_oop(rbx_method);
       __ jmp(rbx_method_fie);
@@ -533,16 +535,15 @@
       if (arg_type == T_OBJECT) {
         __ movptr(Address(rax_argslot, 0), rbx_temp);
       } else {
-        __ load_sized_value(rbx_temp, prim_value_addr,
+        __ load_sized_value(rdx_temp, prim_value_addr,
                             type2aelembytes(arg_type), is_signed_subword_type(arg_type));
-        __ movptr(Address(rax_argslot, 0), rbx_temp);
+        __ movptr(Address(rax_argslot, 0), rdx_temp);
 #ifndef _LP64
         if (arg_slots == 2) {
-          __ movl(rbx_temp, prim_value_addr.plus_disp(wordSize));
-          __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rbx_temp);
+          __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
+          __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
         }
 #endif //_LP64
-        break;
       }
 
       if (direct_to_method) {
@@ -584,7 +585,7 @@
       Label done;
       __ movptr(rdx_temp, vmarg);
       __ testl(rdx_temp, rdx_temp);
-      __ jcc(Assembler::zero, done);          // no cast if null
+      __ jccb(Assembler::zero, done);         // no cast if null
       __ load_klass(rdx_temp, rdx_temp);
 
       // live at this point:
@@ -675,24 +676,24 @@
       // (now we are done with the old MH)
 
       // original 32-bit vmdata word must be of this form:
-      //    | MBZ:16 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
-      __ xchgl(rcx, rbx_vminfo);                // free rcx for shifts
+      //    | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
+      __ xchgptr(rcx, rbx_vminfo);                // free rcx for shifts
       __ shll(rdx_temp /*, rcx*/);
       Label zero_extend, done;
       __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
-      __ jcc(Assembler::zero, zero_extend);
+      __ jccb(Assembler::zero, zero_extend);
 
       // this path is taken for int->byte, int->short
       __ sarl(rdx_temp /*, rcx*/);
-      __ jmp(done);
+      __ jmpb(done);
 
       __ bind(zero_extend);
       // this is taken for int->char
       __ shrl(rdx_temp /*, rcx*/);
 
       __ bind(done);
-      __ movptr(vmarg, rdx_temp);
-      __ xchgl(rcx, rbx_vminfo);                // restore rcx_recv
+      __ movl(vmarg, rdx_temp);
+      __ xchgptr(rcx, rbx_vminfo);                // restore rcx_recv
 
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     }
@@ -861,7 +862,7 @@
             // Verify that argslot > destslot, by at least swap_bytes.
             Label L_ok;
             __ cmpptr(rax_argslot, rbx_destslot);
-            __ jcc(Assembler::aboveEqual, L_ok);
+            __ jccb(Assembler::aboveEqual, L_ok);
             __ stop("source must be above destination (upward rotation)");
             __ bind(L_ok);
           }
@@ -877,7 +878,7 @@
           __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
           __ addptr(rax_argslot, -wordSize);
           __ cmpptr(rax_argslot, rbx_destslot);
-          __ jcc(Assembler::aboveEqual, loop);
+          __ jccb(Assembler::aboveEqual, loop);
         } else {
           __ addptr(rax_argslot, swap_bytes);
 #ifdef ASSERT
@@ -885,7 +886,7 @@
             // Verify that argslot < destslot, by at least swap_bytes.
             Label L_ok;
             __ cmpptr(rax_argslot, rbx_destslot);
-            __ jcc(Assembler::belowEqual, L_ok);
+            __ jccb(Assembler::belowEqual, L_ok);
             __ stop("source must be below destination (downward rotation)");
             __ bind(L_ok);
           }
@@ -901,7 +902,7 @@
           __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
           __ addptr(rax_argslot, wordSize);
           __ cmpptr(rax_argslot, rbx_destslot);
-          __ jcc(Assembler::belowEqual, loop);
+          __ jccb(Assembler::belowEqual, loop);
         }
 
         // pop the original first chunk into the destination slot, now free
@@ -967,7 +968,7 @@
       __ addptr(rax_argslot, wordSize);
       __ addptr(rdx_newarg, wordSize);
       __ cmpptr(rdx_newarg, rbx_oldarg);
-      __ jcc(Assembler::less, loop);
+      __ jccb(Assembler::less, loop);
 
       __ pop(rdi);              // restore temp
 
@@ -1119,7 +1120,7 @@
         }
         __ addptr(rax_argslot, Interpreter::stackElementSize());
         __ cmpptr(rax_argslot, rdx_argslot_limit);
-        __ jcc(Assembler::less, loop);
+        __ jccb(Assembler::less, loop);
       } else if (length_constant == 0) {
         __ bind(skip_array_check);
         // nothing to copy
--- a/src/cpu/x86/vm/runtime_x86_32.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/runtime_x86_32.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -43,11 +43,11 @@
 // This code is entered with a jmp.
 //
 // Arguments:
-//   rax,: exception oop
+//   rax: exception oop
 //   rdx: exception pc
 //
 // Results:
-//   rax,: exception oop
+//   rax: exception oop
 //   rdx: exception pc in caller or ???
 //   destination: exception handler of caller
 //
@@ -113,17 +113,17 @@
   __ addptr(rsp, return_off * wordSize);   // Epilog!
   __ pop(rdx); // Exception pc
 
+  // rax: exception handler for given <exception oop/exception pc>
 
-  // rax,: exception handler for given <exception oop/exception pc>
+  // Restore SP from BP if the exception PC is a MethodHandle call.
+  __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
+  __ cmovptr(Assembler::notEqual, rsp, rbp);
 
   // We have a handler in rax, (could be deopt blob)
   // rdx - throwing pc, deopt blob will need it.
 
   __ push(rax);
 
-  // rcx contains handler address
-
-  __ get_thread(rcx);           // TLS
   // Get the exception
   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
   // Get the exception pc in case we are deoptimized
@@ -137,7 +137,7 @@
 
   __ pop(rcx);
 
-  // rax,: exception oop
+  // rax: exception oop
   // rcx: exception handler
   // rdx: exception pc
   __ jmp (rcx);
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -907,7 +907,8 @@
                                                             int total_args_passed,
                                                             int comp_args_on_stack,
                                                             const BasicType *sig_bt,
-                                                            const VMRegPair *regs) {
+                                                            const VMRegPair *regs,
+                                                            AdapterFingerPrint* fingerprint) {
   address i2c_entry = __ pc();
 
   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
@@ -954,7 +955,7 @@
   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
   __ flush();
-  return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 }
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -638,6 +638,10 @@
 
   __ movptr(rax, Address(rsp, 0));
 
+  // Must preserve original SP for loading incoming arguments because
+  // we need to align the outgoing SP for compiled code.
+  __ movptr(r11, rsp);
+
   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
   // in registers, we will occasionally have no stack args.
   int comp_words_on_stack = 0;
@@ -661,6 +665,10 @@
   // as far as the placement of the call instruction
   __ push(rax);
 
+  // Put saved SP in another register
+  const Register saved_sp = rax;
+  __ movptr(saved_sp, r11);
+
   // Will jump to the compiled code just as if compiled code was doing it.
   // Pre-load the register-jump target early, to schedule it better.
   __ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
@@ -680,11 +688,7 @@
     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
             "scrambled load targets?");
     // Load in argument order going down.
-    // int ld_off = (total_args_passed + comp_words_on_stack -i)*wordSize;
-    // base ld_off on r13 (sender_sp) as the stack alignment makes offsets from rsp
-    // unpredictable
-    int ld_off = ((total_args_passed - 1) - i)*Interpreter::stackElementSize();
-
+    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
     // Point to interpreter value (vs. tag)
     int next_off = ld_off - Interpreter::stackElementSize();
     //
@@ -699,10 +703,14 @@
     if (r_1->is_stack()) {
       // Convert stack slot to an SP offset (+ wordSize to account for return address )
       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
+
+      // We can use r13 as a temp here because compiled code doesn't need r13 as an input
+      // and if we end up going thru a c2i because of a miss a reasonable value of r13
+      // will be generated.
       if (!r_2->is_valid()) {
         // sign extend???
-        __ movl(rax, Address(r13, ld_off));
-        __ movptr(Address(rsp, st_off), rax);
+        __ movl(r13, Address(saved_sp, ld_off));
+        __ movptr(Address(rsp, st_off), r13);
       } else {
         //
         // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
@@ -715,9 +723,9 @@
         // ld_off is MSW so get LSW
         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
                            next_off : ld_off;
-        __ movq(rax, Address(r13, offset));
+        __ movq(r13, Address(saved_sp, offset));
         // st_off is LSW (i.e. reg.first())
-        __ movq(Address(rsp, st_off), rax);
+        __ movq(Address(rsp, st_off), r13);
       }
     } else if (r_1->is_Register()) {  // Register argument
       Register r = r_1->as_Register();
@@ -732,16 +740,16 @@
                            next_off : ld_off;
 
         // this can be a misaligned move
-        __ movq(r, Address(r13, offset));
+        __ movq(r, Address(saved_sp, offset));
       } else {
         // sign extend and use a full word?
-        __ movl(r, Address(r13, ld_off));
+        __ movl(r, Address(saved_sp, ld_off));
       }
     } else {
       if (!r_2->is_valid()) {
-        __ movflt(r_1->as_XMMRegister(), Address(r13, ld_off));
+        __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
       } else {
-        __ movdbl(r_1->as_XMMRegister(), Address(r13, next_off));
+        __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
       }
     }
   }
@@ -770,7 +778,8 @@
                                                             int total_args_passed,
                                                             int comp_args_on_stack,
                                                             const BasicType *sig_bt,
-                                                            const VMRegPair *regs) {
+                                                            const VMRegPair *regs,
+                                                            AdapterFingerPrint* fingerprint) {
   address i2c_entry = __ pc();
 
   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
@@ -816,7 +825,7 @@
   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
   __ flush();
-  return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 }
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
@@ -3319,6 +3328,10 @@
 
   // rax: exception handler
 
+  // Restore SP from BP if the exception PC is a MethodHandle call.
+  __ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0);
+  __ cmovptr(Assembler::notEqual, rsp, rbp);
+
   // We have a handler in rax (could be deopt blob).
   __ mov(r8, rax);
 
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -718,10 +718,8 @@
       case BarrierSet::G1SATBCTLogging:
         {
           __ pusha();                      // push registers
-          __ push(count);
-          __ push(start);
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
-          __ addptr(rsp, 2*wordSize);
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
+                          start, count);
           __ popa();
         }
         break;
@@ -752,10 +750,8 @@
       case BarrierSet::G1SATBCTLogging:
         {
           __ pusha();                      // push registers
-          __ push(count);
-          __ push(start);
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
-          __ addptr(rsp, 2*wordSize);
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
+                          start, count);
           __ popa();
         }
         break;
@@ -2030,6 +2026,54 @@
                                entry_checkcast_arraycopy);
   }
 
+  void generate_math_stubs() {
+    {
+      StubCodeMark mark(this, "StubRoutines", "log");
+      StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ flog();
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "log10");
+      StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ flog10();
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "sin");
+      StubRoutines::_intrinsic_sin = (double (*)(double))  __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ trigfunc('s');
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "cos");
+      StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ trigfunc('c');
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "tan");
+      StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
+
+      __ fld_d(Address(rsp, 4));
+      __ trigfunc('t');
+      __ ret(0);
+    }
+
+    // The intrinsic version of these seem to return the same value as
+    // the strict version.
+    StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
+    StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
+  }
+
  public:
   // Information about frame layout at time of blocking runtime call.
   // Note that we only have to preserve callee-saved registers since
@@ -2228,6 +2272,8 @@
         MethodHandles::generate_method_handle_stub(_masm, ek);
       }
     }
+
+    generate_math_stubs();
   }
 
 
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1172,7 +1172,7 @@
             __ movptr(c_rarg0, addr);
             __ movptr(c_rarg1, count);
           }
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
           __ popa();
         }
         break;
@@ -1212,7 +1212,7 @@
           __ shrptr(scratch, LogBytesPerHeapOop);  // convert to element count
           __ mov(c_rarg0, start);
           __ mov(c_rarg1, scratch);
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
           __ popa();
         }
         break;
@@ -2731,6 +2731,79 @@
     StubRoutines::_arrayof_oop_arraycopy             = StubRoutines::_oop_arraycopy;
   }
 
+  void generate_math_stubs() {
+    {
+      StubCodeMark mark(this, "StubRoutines", "log");
+      StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ flog();
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "log10");
+      StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ flog10();
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "sin");
+      StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ trigfunc('s');
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "cos");
+      StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ trigfunc('c');
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+    {
+      StubCodeMark mark(this, "StubRoutines", "tan");
+      StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
+
+      __ subq(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      __ trigfunc('t');
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addq(rsp, 8);
+      __ ret(0);
+    }
+
+    // The intrinsic version of these seem to return the same value as
+    // the strict version.
+    StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
+    StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
+  }
+
 #undef __
 #define __ masm->
 
@@ -2935,6 +3008,18 @@
 
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
+
+    // generic method handle stubs
+    if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
+      for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
+           ek < MethodHandles::_EK_LIMIT;
+           ek = MethodHandles::EntryKind(1 + (int)ek)) {
+        StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
+        MethodHandles::generate_method_handle_stub(_masm, ek);
+      }
+    }
+
+    generate_math_stubs();
   }
 
  public:
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,15 +155,8 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   TosState incoming_state = state;
-  if (EnableInvokeDynamic) {
-    if (unbox) {
-      incoming_state = atos;
-    }
-  } else {
-    assert(!unbox, "old behavior");
-  }
 
   Label interpreter_entry;
   address compiled_entry = __ pc();
@@ -216,46 +209,6 @@
   __ restore_bcp();
   __ restore_locals();
 
-  Label L_fail;
-
-  if (unbox && state != atos) {
-    // cast and unbox
-    BasicType type = as_BasicType(state);
-    if (type == T_BYTE)  type = T_BOOLEAN; // FIXME
-    KlassHandle boxk = SystemDictionaryHandles::box_klass(type);
-    __ mov32(rbx, ExternalAddress((address) boxk.raw_value()));
-    __ testl(rax, rax);
-    Label L_got_value, L_get_value;
-    // convert nulls to zeroes (avoid NPEs here)
-    if (!(type == T_FLOAT || type == T_DOUBLE)) {
-      // if rax already contains zero bits, forge ahead
-      __ jcc(Assembler::zero, L_got_value);
-    } else {
-      __ jcc(Assembler::notZero, L_get_value);
-      __ fldz();
-      __ jmp(L_got_value);
-    }
-    __ bind(L_get_value);
-    __ cmp32(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
-    __ jcc(Assembler::notEqual, L_fail);
-    int offset = java_lang_boxing_object::value_offset_in_bytes(type);
-    // Cf. TemplateTable::getfield_or_static
-    switch (type) {
-      case T_BYTE:     // fall through:
-      case T_BOOLEAN:  __ load_signed_byte(rax, Address(rax, offset));    break;
-      case T_CHAR:     __ load_unsigned_short(rax, Address(rax, offset)); break;
-      case T_SHORT:    __ load_signed_short(rax, Address(rax, offset));   break;
-      case T_INT:      __ movl(rax, Address(rax, offset));                break;
-      case T_FLOAT:    __ fld_s(Address(rax, offset));                    break;
-      case T_DOUBLE:   __ fld_d(Address(rax, offset));                    break;
-      // Access to java.lang.Double.value does not need to be atomic:
-      case T_LONG:   { __ movl(rdx, Address(rax, offset + 4));
-                       __ movl(rax, Address(rax, offset + 0));  }         break;
-      default: ShouldNotReachHere();
-    }
-    __ bind(L_got_value);
-  }
-
   Label L_got_cache, L_giant_index;
   if (EnableInvokeDynamic) {
     __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
@@ -263,32 +216,6 @@
   }
   __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
   __ bind(L_got_cache);
-  if (unbox && state == atos) {
-    // insert a casting conversion, to keep verifier sane
-    Label L_ok, L_ok_pops;
-    __ testl(rax, rax);
-    __ jcc(Assembler::zero, L_ok);
-    __ push(rax);               // save the object to check
-    __ push(rbx);               // save CP cache reference
-    __ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
-    __ movl(rbx, Address(rbx, rcx,
-                      Address::times_4, constantPoolCacheOopDesc::base_offset() +
-                      ConstantPoolCacheEntry::f1_offset()));
-    __ movl(rbx, Address(rbx, __ delayed_value(sun_dyn_CallSiteImpl::type_offset_in_bytes, rcx)));
-    __ movl(rbx, Address(rbx, __ delayed_value(java_dyn_MethodType::rtype_offset_in_bytes, rcx)));
-    __ movl(rax, Address(rbx, __ delayed_value(java_lang_Class::klass_offset_in_bytes, rcx)));
-    __ check_klass_subtype(rdx, rax, rbx, L_ok_pops);
-    __ pop(rcx);                // pop and discard CP cache
-    __ mov(rbx, rax);           // target supertype into rbx for L_fail
-    __ pop(rax);                // failed object into rax for L_fail
-    __ jmp(L_fail);
-
-    __ bind(L_ok_pops);
-    // restore pushed temp regs:
-    __ pop(rbx);
-    __ pop(rax);
-    __ bind(L_ok);
-  }
   __ movl(rbx, Address(rbx, rcx,
                     Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
                     ConstantPoolCacheEntry::flags_offset()));
@@ -301,14 +228,6 @@
     __ bind(L_giant_index);
     __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
     __ jmp(L_got_cache);
-
-    if (unbox) {
-      __ bind(L_fail);
-      __ push(rbx);             // missed klass (required)
-      __ push(rax);             // bad object (actual)
-      __ movptr(rdx, ExternalAddress((address) &Interpreter::_throw_WrongMethodType_entry));
-      __ call(rdx);
-    }
   }
 
   return entry;
@@ -1512,6 +1431,23 @@
 
 }
 
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  switch (method_kind(m)) {
+    case Interpreter::java_lang_math_sin     : // fall thru
+    case Interpreter::java_lang_math_cos     : // fall thru
+    case Interpreter::java_lang_math_tan     : // fall thru
+    case Interpreter::java_lang_math_abs     : // fall thru
+    case Interpreter::java_lang_math_log     : // fall thru
+    case Interpreter::java_lang_math_log10   : // fall thru
+    case Interpreter::java_lang_math_sqrt    :
+      return false;
+    default:
+      return true;
+  }
+}
+
 // How much stack a method activation needs in words.
 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
 
@@ -1569,7 +1505,10 @@
 
   if (interpreter_frame != NULL) {
 #ifdef ASSERT
-    assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
+    if (!EnableMethodHandles)
+      // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
+      // Probably, since deoptimization doesn't work yet.
+      assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
     assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
 #endif
 
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,21 +100,26 @@
   return entry;
 }
 
-// Arguments are: required type in rarg1, failing object (or NULL) in rarg2
+// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4.
 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
   address entry = __ pc();
 
   __ pop(c_rarg2);              // failing object is at TOS
   __ pop(c_rarg1);              // required type is at TOS+8
 
-  // expression stack must be empty before entering the VM if an
-  // exception happened
+  __ verify_oop(c_rarg1);
+  __ verify_oop(c_rarg2);
+
+  // Various method handle types use interpreter registers as temps.
+  __ restore_bcp();
+  __ restore_locals();
+
+  // Expression stack must be empty before entering the VM for an exception.
   __ empty_expression_stack();
 
   __ call_VM(noreg,
              CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_WrongMethodTypeException),
+                              InterpreterRuntime::throw_WrongMethodTypeException),
              // pass required type, failing object (or NULL)
              c_rarg1, c_rarg2);
   return entry;
@@ -166,8 +171,7 @@
 
 
 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
-                                                                int step, bool unbox) {
-  assert(!unbox, "NYI");//6815692//
+                                                                int step) {
 
   // amd64 doesn't need to do anything special about compiled returns
   // to the interpreter so the code that exists on x86 to place a sentinel
@@ -183,15 +187,29 @@
   __ restore_bcp();
   __ restore_locals();
 
-  __ get_cache_and_index_at_bcp(rbx, rcx, 1);
+  Label L_got_cache, L_giant_index;
+  if (EnableInvokeDynamic) {
+    __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
+    __ jcc(Assembler::equal, L_giant_index);
+  }
+  __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
+  __ bind(L_got_cache);
   __ movl(rbx, Address(rbx, rcx,
-                       Address::times_8,
+                       Address::times_ptr,
                        in_bytes(constantPoolCacheOopDesc::base_offset()) +
                        3 * wordSize));
   __ andl(rbx, 0xFF);
   if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
   __ lea(rsp, Address(rsp, rbx, Address::times_8));
   __ dispatch_next(state, step);
+
+  // out of the main line of code...
+  if (EnableInvokeDynamic) {
+    __ bind(L_giant_index);
+    __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
+    __ jmp(L_got_cache);
+  }
+
   return entry;
 }
 
@@ -431,8 +449,12 @@
   __ addptr(rax, stack_base);
   __ subptr(rax, stack_size);
 
+  // Use the maximum number of pages we might bang.
+  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
+                                                                              (StackRedPages+StackYellowPages);
+
   // add in the red and yellow zone sizes
-  __ addptr(rax, (StackRedPages + StackYellowPages) * page_size);
+  __ addptr(rax, max_pages * page_size);
 
   // check against the current stack bottom
   __ cmpptr(rsp, rax);
@@ -1434,6 +1456,23 @@
                                 generate_normal_entry(synchronized);
 }
 
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  switch (method_kind(m)) {
+    case Interpreter::java_lang_math_sin     : // fall thru
+    case Interpreter::java_lang_math_cos     : // fall thru
+    case Interpreter::java_lang_math_tan     : // fall thru
+    case Interpreter::java_lang_math_abs     : // fall thru
+    case Interpreter::java_lang_math_log     : // fall thru
+    case Interpreter::java_lang_math_log10   : // fall thru
+    case Interpreter::java_lang_math_sqrt    :
+      return false;
+    default:
+      return true;
+  }
+}
+
 // How much stack a method activation needs in words.
 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
   const int entry_size = frame::interpreter_frame_monitor_size();
@@ -1484,8 +1523,10 @@
          tempcount* Interpreter::stackElementWords() + popframe_extra_args;
   if (interpreter_frame != NULL) {
 #ifdef ASSERT
-    assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(),
-           "Frame not properly walkable");
+    if (!EnableMethodHandles)
+      // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
+      // Probably, since deoptimization doesn't work yet.
+      assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
     assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
 #endif
 
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -2890,9 +2890,6 @@
 
 
 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
-  bool is_invdyn_bootstrap = (byte_no < 0);
-  if (is_invdyn_bootstrap)  byte_no = -byte_no;
-
   // determine flags
   Bytecodes::Code code = bytecode();
   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
@@ -2907,8 +2904,6 @@
   const Register flags  = rdx;
   assert_different_registers(method, index, recv, flags);
 
-  assert(!is_invdyn_bootstrap || is_invokedynamic, "byte_no<0 hack only for invdyn");
-
   // save 'interpreter return address'
   __ save_bcp();
 
@@ -2944,9 +2939,7 @@
   // load return address
   {
     address table_addr;
-    if (is_invdyn_bootstrap)
-      table_addr = (address)Interpreter::return_5_unbox_addrs_by_index_table();
-    else if (is_invokeinterface || is_invokedynamic)
+    if (is_invokeinterface || is_invokedynamic)
       table_addr = (address)Interpreter::return_5_addrs_by_index_table();
     else
       table_addr = (address)Interpreter::return_3_addrs_by_index_table();
@@ -3153,54 +3146,10 @@
     __ profile_call(rsi);
   }
 
-  Label handle_unlinked_site;
-  __ movptr(rcx, Address(rax, __ delayed_value(sun_dyn_CallSiteImpl::target_offset_in_bytes, rcx)));
-  __ testptr(rcx, rcx);
-  __ jcc(Assembler::zero, handle_unlinked_site);
-
+  __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
+  __ null_check(rcx);
   __ prepare_to_jump_from_interpreted();
   __ jump_to_method_handle_entry(rcx, rdx);
-
-  // Initial calls come here...
-  __ bind(handle_unlinked_site);
-  __ pop(rcx);                 // remove return address pushed by prepare_invoke
-
-  // box stacked arguments into an array for the bootstrap method
-  address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::bootstrap_invokedynamic);
-  __ restore_bcp();      // rsi must be correct for call_VM
-  __ call_VM(rax, entry, rax);
-  __ movl(rdi, rax);            // protect bootstrap MH from prepare_invoke
-
-  // recompute return address
-  __ restore_bcp();      // rsi must be correct for prepare_invoke
-  prepare_invoke(rax, rbx, -byte_no);  // smashes rcx, rdx
-  // rax: CallSite object (f1)
-  // rbx: unused (f2)
-  // rdi: bootstrap MH
-  // rdx: flags
-
-  // now load up the arglist, which has been neatly boxed
-  __ get_thread(rcx);
-  __ movptr(rdx, Address(rcx, JavaThread::vm_result_2_offset()));
-  __ movptr(Address(rcx, JavaThread::vm_result_2_offset()), NULL_WORD);
-  __ verify_oop(rdx);
-  // rdx = arglist
-
-  // save SP now, before we add the bootstrap call to the stack
-  // We must preserve a fiction that the original arguments are outgoing,
-  // because the return sequence will reset the stack to this point
-  // and then pop all those arguments.  It seems error-prone to use
-  // a different argument list size just for bootstrapping.
-  __ prepare_to_jump_from_interpreted();
-
-  // Now let's play adapter, pushing the real arguments on the stack.
-  __ pop(rbx);                  // return PC
-  __ push(rdi);                 // boot MH
-  __ push(rax);                 // call site
-  __ push(rdx);                 // arglist
-  __ push(rbx);                 // return PC, again
-  __ mov(rcx, rdi);
-  __ jump_to_method_handle_entry(rcx, rdx);
 }
 
 //----------------------------------------------------------------------------------------------------
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -203,18 +203,15 @@
     __ jcc(Assembler::notEqual, fast_patch);
     __ get_method(scratch);
     // Let breakpoint table handling rewrite to quicker bytecode
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::set_original_bytecode_at),
-               scratch, r13, bc);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc);
 #ifndef ASSERT
     __ jmpb(patch_done);
+#else
+    __ jmp(patch_done);
+#endif
     __ bind(fast_patch);
   }
-#else
-    __ jmp(patch_done);
-    __ bind(fast_patch);
-  }
+#ifdef ASSERT
   Label okay;
   __ load_unsigned_byte(scratch, at_bcp(0));
   __ cmpl(scratch, (int) Bytecodes::java_code(bytecode));
@@ -2054,26 +2051,28 @@
   }
 }
 
-void TemplateTable::resolve_cache_and_index(int byte_no,
-                                            Register Rcache,
-                                            Register index) {
+void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
   assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
+  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
 
   const Register temp = rbx;
   assert_different_registers(Rcache, index, temp);
 
   const int shift_count = (1 + byte_no) * BitsPerByte;
   Label resolved;
-  __ get_cache_and_index_at_bcp(Rcache, index, 1);
-  __ movl(temp, Address(Rcache,
-                        index, Address::times_8,
-                        constantPoolCacheOopDesc::base_offset() +
-                        ConstantPoolCacheEntry::indices_offset()));
-  __ shrl(temp, shift_count);
-  // have we resolved this bytecode?
-  __ andl(temp, 0xFF);
-  __ cmpl(temp, (int) bytecode());
-  __ jcc(Assembler::equal, resolved);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+  if (is_invokedynamic) {
+    // we are resolved if the f1 field contains a non-null CallSite object
+    __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
+    __ jcc(Assembler::notEqual, resolved);
+  } else {
+    __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+    __ shrl(temp, shift_count);
+    // have we resolved this bytecode?
+    __ andl(temp, 0xFF);
+    __ cmpl(temp, (int) bytecode());
+    __ jcc(Assembler::equal, resolved);
+  }
 
   // resolve first time through
   address entry;
@@ -2090,6 +2089,9 @@
   case Bytecodes::_invokeinterface:
     entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
     break;
+  case Bytecodes::_invokedynamic:
+    entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
+    break;
   default:
     ShouldNotReachHere();
     break;
@@ -2098,7 +2100,7 @@
   __ call_VM(noreg, entry, temp);
 
   // Update registers with resolved info
-  __ get_cache_and_index_at_bcp(Rcache, index, 1);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   __ bind(resolved);
 }
 
@@ -2832,15 +2834,14 @@
   ShouldNotReachHere();
 }
 
-void TemplateTable::prepare_invoke(Register method,
-                                   Register index,
-                                   int byte_no,
-                                   Bytecodes::Code code) {
+void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
   // determine flags
+  Bytecodes::Code code = bytecode();
   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
+  const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
-  const bool load_receiver       = code != Bytecodes::_invokestatic;
+  const bool load_receiver      = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
   const bool receiver_null_check = is_invokespecial;
   const bool save_flags = is_invokeinterface || is_invokevirtual;
   // setup registers & access constant pool cache
@@ -2858,9 +2859,13 @@
     __ movl(recv, flags);
     __ andl(recv, 0xFF);
     if (TaggedStackInterpreter) __ shll(recv, 1);  // index*2
-    __ movptr(recv, Address(rsp, recv, Address::times_8,
-                                 -Interpreter::expr_offset_in_bytes(1)));
-    __ verify_oop(recv);
+    Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
+    if (is_invokedynamic) {
+      __ lea(recv, recv_addr);
+    } else {
+      __ movptr(recv, recv_addr);
+      __ verify_oop(recv);
+    }
   }
 
   // do null check if needed
@@ -2878,10 +2883,14 @@
   ConstantPoolCacheEntry::verify_tosBits();
   // load return address
   {
-    ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table());
-    ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table());
-    __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3));
-    __ movptr(flags, Address(rscratch1, flags, Address::times_8));
+    address table_addr;
+    if (is_invokeinterface || is_invokedynamic)
+      table_addr = (address)Interpreter::return_5_addrs_by_index_table();
+    else
+      table_addr = (address)Interpreter::return_3_addrs_by_index_table();
+    ExternalAddress table(table_addr);
+    __ lea(rscratch1, table);
+    __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
   }
 
   // push return address
@@ -2947,7 +2956,7 @@
 
 void TemplateTable::invokevirtual(int byte_no) {
   transition(vtos, vtos);
-  prepare_invoke(rbx, noreg, byte_no, bytecode());
+  prepare_invoke(rbx, noreg, byte_no);
 
   // rbx: index
   // rcx: receiver
@@ -2959,7 +2968,7 @@
 
 void TemplateTable::invokespecial(int byte_no) {
   transition(vtos, vtos);
-  prepare_invoke(rbx, noreg, byte_no, bytecode());
+  prepare_invoke(rbx, noreg, byte_no);
   // do the call
   __ verify_oop(rbx);
   __ profile_call(rax);
@@ -2969,7 +2978,7 @@
 
 void TemplateTable::invokestatic(int byte_no) {
   transition(vtos, vtos);
-  prepare_invoke(rbx, noreg, byte_no, bytecode());
+  prepare_invoke(rbx, noreg, byte_no);
   // do the call
   __ verify_oop(rbx);
   __ profile_call(rax);
@@ -2983,7 +2992,7 @@
 
 void TemplateTable::invokeinterface(int byte_no) {
   transition(vtos, vtos);
-  prepare_invoke(rax, rbx, byte_no, bytecode());
+  prepare_invoke(rax, rbx, byte_no);
 
   // rax: Interface
   // rbx: index
@@ -3072,7 +3081,24 @@
     return;
   }
 
-  __ stop("invokedynamic NYI");//6815692//
+  prepare_invoke(rax, rbx, byte_no);
+
+  // rax: CallSite object (f1)
+  // rbx: unused (f2)
+  // rcx: receiver address
+  // rdx: flags (unused)
+
+  if (ProfileInterpreter) {
+    Label L;
+    // %%% should make a type profile for any invokedynamic that takes a ref argument
+    // profile this call
+    __ profile_call(r13);
+  }
+
+  __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
+  __ null_check(rcx);
+  __ prepare_to_jump_from_interpreted();
+  __ jump_to_method_handle_entry(rcx, rdx);
 }
 
 
@@ -3212,17 +3238,19 @@
     __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
     __ store_klass_gap(rax, rcx);  // zero klass gap for compressed oops
     __ store_klass(rax, rsi);      // store klass last
+
+    {
+      SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
+      // Trigger dtrace event for fastpath
+      __ push(atos); // save the return value
+      __ call_VM_leaf(
+           CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
+      __ pop(atos); // restore the return value
+
+    }
     __ jmp(done);
   }
 
-  {
-    SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
-    // Trigger dtrace event for fastpath
-    __ push(atos); // save the return value
-    __ call_VM_leaf(
-         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
-    __ pop(atos); // restore the return value
-  }
 
   // slow case
   __ bind(slow_case);
--- a/src/cpu/x86/vm/templateTable_x86_64.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/templateTable_x86_64.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -22,8 +22,7 @@
  *
  */
 
-  static void prepare_invoke(Register method, Register index, int byte_no,
-                             Bytecodes::Code code);
+  static void prepare_invoke(Register method, Register index, int byte_no);
   static void invokevirtual_helper(Register index, Register recv,
                                    Register flags);
   static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -255,6 +255,8 @@
   if (!VM_Version::supports_sse2()) {
     vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
   }
+  // in 64 bit the use of SSE2 is the minimum
+  if (UseSSE < 2) UseSSE = 2;
 #endif
 
   // If the OS doesn't support SSE, we can't use this feature even if the HW does
--- a/src/cpu/x86/vm/x86_32.ad	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/x86_32.ad	Wed Mar 24 17:16:33 2010 -0700
@@ -235,6 +235,11 @@
 //----------SOURCE BLOCK-------------------------------------------------------
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description
+source_hpp %{
+// Must be visible to the DFA in dfa_x86_32.cpp
+extern bool is_operand_hi32_zero(Node* n);
+%}
+
 source %{
 #define   RELOC_IMM32    Assembler::imm_operand
 #define   RELOC_DISP32   Assembler::disp32_operand
@@ -268,22 +273,36 @@
 static jlong *float_signflip_pool  = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
 
+// Offset hacking within calls.
+static int pre_call_FPU_size() {
+  if (Compile::current()->in_24_bit_fp_mode())
+    return 6; // fldcw
+  return 0;
+}
+
+static int preserve_SP_size() {
+  return LP64_ONLY(1 +) 2;  // [rex,] op, rm(reg/reg)
+}
+
 // !!!!! Special hack to get all type of calls to specify the byte offset
 //       from the start of the call to the point where the return address
 //       will point.
 int MachCallStaticJavaNode::ret_addr_offset() {
-  return 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0);  // 5 bytes from start of call to where return address points
+  int offset = 5 + pre_call_FPU_size();  // 5 bytes from start of call to where return address points
+  if (_method_handle_invoke)
+    offset += preserve_SP_size();
+  return offset;
 }
 
 int MachCallDynamicJavaNode::ret_addr_offset() {
-  return 10 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0);  // 10 bytes from start of call to where return address points
+  return 10 + pre_call_FPU_size();  // 10 bytes from start of call to where return address points
 }
 
 static int sizeof_FFree_Float_Stack_All = -1;
 
 int MachCallRuntimeNode::ret_addr_offset() {
   assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
-  return sizeof_FFree_Float_Stack_All + 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0);
+  return sizeof_FFree_Float_Stack_All + 5 + pre_call_FPU_size();
 }
 
 // Indicate if the safepoint node needs the polling page as an input.
@@ -299,8 +318,16 @@
 // The address of the call instruction needs to be 4-byte aligned to
 // ensure that it does not span a cache line so that it can be patched.
 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
-  if (Compile::current()->in_24_bit_fp_mode())
-    current_offset += 6;    // skip fldcw in pre_call_FPU, if any
+  current_offset += pre_call_FPU_size();  // skip fldcw, if any
+  current_offset += 1;      // skip call opcode byte
+  return round_to(current_offset, alignment_required()) - current_offset;
+}
+
+// The address of the call instruction needs to be 4-byte aligned to
+// ensure that it does not span a cache line so that it can be patched.
+int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
+  current_offset += pre_call_FPU_size();  // skip fldcw, if any
+  current_offset += preserve_SP_size();   // skip mov rbp, rsp
   current_offset += 1;      // skip call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
 }
@@ -308,8 +335,7 @@
 // The address of the call instruction needs to be 4-byte aligned to
 // ensure that it does not span a cache line so that it can be patched.
 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
-  if (Compile::current()->in_24_bit_fp_mode())
-    current_offset += 6;    // skip fldcw in pre_call_FPU, if any
+  current_offset += pre_call_FPU_size();  // skip fldcw, if any
   current_offset += 5;      // skip MOV instruction
   current_offset += 1;      // skip call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
@@ -1460,6 +1486,25 @@
   return RegMask();
 }
 
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+  return EBP_REG_mask;
+}
+
+// Returns true if the high 32 bits of the value is known to be zero.
+bool is_operand_hi32_zero(Node* n) {
+  int opc = n->Opcode();
+  if (opc == Op_LoadUI2L) {
+    return true;
+  }
+  if (opc == Op_AndL) {
+    Node* o2 = n->in(2);
+    if (o2->is_Con() && (o2->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
+      return true;
+    }
+  }
+  return false;
+}
+
 %}
 
 //----------ENCODING BLOCK-----------------------------------------------------
@@ -1772,10 +1817,13 @@
 
   enc_class pre_call_FPU %{
     // If method sets FPU control word restore it here
+    debug_only(int off0 = cbuf.code_size());
     if( Compile::current()->in_24_bit_fp_mode() ) {
       MacroAssembler masm(&cbuf);
       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
     }
+    debug_only(int off1 = cbuf.code_size());
+    assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
   %}
 
   enc_class post_call_FPU %{
@@ -1786,6 +1834,21 @@
     }
   %}
 
+  enc_class preserve_SP %{
+    debug_only(int off0 = cbuf.code_size());
+    MacroAssembler _masm(&cbuf);
+    // RBP is preserved across all calls, even compiled calls.
+    // Use it to preserve RSP in places where the callee might change the SP.
+    __ movptr(rbp, rsp);
+    debug_only(int off1 = cbuf.code_size());
+    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+  %}
+
+  enc_class restore_SP %{
+    MacroAssembler _masm(&cbuf);
+    __ movptr(rsp, rbp);
+  %}
+
   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
     // who we intended to call.
@@ -8556,6 +8619,63 @@
   ins_pipe( pipe_slow );
 %}
 
+// Multiply Register Long where the left operand's high 32 bits are zero
+instruct mulL_eReg_lhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
+  predicate(is_operand_hi32_zero(n->in(1)));
+  match(Set dst (MulL dst src));
+  effect(KILL cr, TEMP tmp);
+  ins_cost(2*100+2*400);
+// Basic idea: lo(result) = lo(x_lo * y_lo)
+//             hi(result) = hi(x_lo * y_lo) + lo(x_lo * y_hi) where lo(x_hi * y_lo) = 0 because x_hi = 0
+  format %{ "MOV    $tmp,$src.hi\n\t"
+            "IMUL   $tmp,EAX\n\t"
+            "MUL    EDX:EAX,$src.lo\n\t"
+            "ADD    EDX,$tmp" %}
+  ins_encode %{
+    __ movl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
+    __ imull($tmp$$Register, rax);
+    __ mull($src$$Register);
+    __ addl(rdx, $tmp$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// Multiply Register Long where the right operand's high 32 bits are zero
+instruct mulL_eReg_rhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
+  predicate(is_operand_hi32_zero(n->in(2)));
+  match(Set dst (MulL dst src));
+  effect(KILL cr, TEMP tmp);
+  ins_cost(2*100+2*400);
+// Basic idea: lo(result) = lo(x_lo * y_lo)
+//             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) where lo(x_lo * y_hi) = 0 because y_hi = 0
+  format %{ "MOV    $tmp,$src.lo\n\t"
+            "IMUL   $tmp,EDX\n\t"
+            "MUL    EDX:EAX,$src.lo\n\t"
+            "ADD    EDX,$tmp" %}
+  ins_encode %{
+    __ movl($tmp$$Register, $src$$Register);
+    __ imull($tmp$$Register, rdx);
+    __ mull($src$$Register);
+    __ addl(rdx, $tmp$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// Multiply Register Long where the left and the right operands' high 32 bits are zero
+instruct mulL_eReg_hi0(eADXRegL dst, eRegL src, eFlagsReg cr) %{
+  predicate(is_operand_hi32_zero(n->in(1)) && is_operand_hi32_zero(n->in(2)));
+  match(Set dst (MulL dst src));
+  effect(KILL cr);
+  ins_cost(1*400);
+// Basic idea: lo(result) = lo(x_lo * y_lo)
+//             hi(result) = hi(x_lo * y_lo) where lo(x_hi * y_lo) = 0 and lo(x_lo * y_hi) = 0 because x_hi = 0 and y_hi = 0
+  format %{ "MUL    EDX:EAX,$src.lo\n\t" %}
+  ins_encode %{
+    __ mull($src$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 // Multiply Register Long by small constant
 instruct mulL_eReg_con(eADXRegL dst, immL_127 src, eRegI tmp, eFlagsReg cr) %{
   match(Set dst (MulL dst src));
@@ -13406,6 +13526,7 @@
 //       compute_padding() functions will have to be adjusted.
 instruct CallStaticJavaDirect(method meth) %{
   match(CallStaticJava);
+  predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
   effect(USE meth);
 
   ins_cost(300);
@@ -13420,6 +13541,30 @@
   ins_alignment(4);
 %}
 
+// Call Java Static Instruction (method handle version)
+// Note: If this code changes, the corresponding ret_addr_offset() and
+//       compute_padding() functions will have to be adjusted.
+instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{
+  match(CallStaticJava);
+  predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
+  effect(USE meth);
+  // EBP is saved by all callees (for interpreter stack correction).
+  // We use it here for a similar purpose, in {preserve,restore}_SP.
+
+  ins_cost(300);
+  format %{ "CALL,static/MethodHandle " %}
+  opcode(0xE8); /* E8 cd */
+  ins_encode( pre_call_FPU,
+              preserve_SP,
+              Java_Static_Call( meth ),
+              restore_SP,
+              call_epilog,
+              post_call_FPU );
+  ins_pipe( pipe_slow );
+  ins_pc_relative(1);
+  ins_alignment(4);
+%}
+
 // Call Java Dynamic Instruction
 // Note: If this code changes, the corresponding ret_addr_offset() and
 //       compute_padding() functions will have to be adjusted.
--- a/src/cpu/x86/vm/x86_64.ad	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/x86/vm/x86_64.ad	Wed Mar 24 17:16:33 2010 -0700
@@ -551,12 +551,19 @@
 
 #define __ _masm.
 
+static int preserve_SP_size() {
+  return LP64_ONLY(1 +) 2;  // [rex,] op, rm(reg/reg)
+}
+
 // !!!!! Special hack to get all types of calls to specify the byte offset
 //       from the start of the call to the point where the return address
 //       will point.
 int MachCallStaticJavaNode::ret_addr_offset()
 {
-  return 5; // 5 bytes from start of call to where return address points
+  int offset = 5; // 5 bytes from start of call to where return address points
+  if (_method_handle_invoke)
+    offset += preserve_SP_size();
+  return offset;
 }
 
 int MachCallDynamicJavaNode::ret_addr_offset()
@@ -589,6 +596,15 @@
 
 // The address of the call instruction needs to be 4-byte aligned to
 // ensure that it does not span a cache line so that it can be patched.
+int CallStaticJavaHandleNode::compute_padding(int current_offset) const
+{
+  current_offset += preserve_SP_size();   // skip mov rbp, rsp
+  current_offset += 1; // skip call opcode byte
+  return round_to(current_offset, alignment_required()) - current_offset;
+}
+
+// The address of the call instruction needs to be 4-byte aligned to
+// ensure that it does not span a cache line so that it can be patched.
 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
 {
   current_offset += 11; // skip movq instruction + call opcode byte
@@ -2113,6 +2129,10 @@
   return LONG_RDX_REG_mask;
 }
 
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+  return PTR_RBP_REG_mask;
+}
+
 static Address build_address(int b, int i, int s, int d) {
   Register index = as_Register(i);
   Address::ScaleFactor scale = (Address::ScaleFactor)s;
@@ -2608,6 +2628,21 @@
                    RELOC_DISP32);
   %}
 
+  enc_class preserve_SP %{
+    debug_only(int off0 = cbuf.code_size());
+    MacroAssembler _masm(&cbuf);
+    // RBP is preserved across all calls, even compiled calls.
+    // Use it to preserve RSP in places where the callee might change the SP.
+    __ movptr(rbp, rsp);
+    debug_only(int off1 = cbuf.code_size());
+    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+  %}
+
+  enc_class restore_SP %{
+    MacroAssembler _masm(&cbuf);
+    __ movptr(rsp, rbp);
+  %}
+
   enc_class Java_Static_Call(method meth)
   %{
     // JAVA STATIC CALL
@@ -12526,9 +12561,9 @@
 // Call Java Static Instruction
 // Note: If this code changes, the corresponding ret_addr_offset() and
 //       compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaDirect(method meth)
-%{
+instruct CallStaticJavaDirect(method meth) %{
   match(CallStaticJava);
+  predicate(!((CallStaticJavaNode*) n)->is_method_handle_invoke());
   effect(USE meth);
 
   ins_cost(300);
@@ -12540,6 +12575,28 @@
   ins_alignment(4);
 %}
 
+// Call Java Static Instruction (method handle version)
+// Note: If this code changes, the corresponding ret_addr_offset() and
+//       compute_padding() functions will have to be adjusted.
+instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{
+  match(CallStaticJava);
+  predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
+  effect(USE meth);
+  // RBP is saved by all callees (for interpreter stack correction).
+  // We use it here for a similar purpose, in {preserve,restore}_SP.
+
+  ins_cost(300);
+  format %{ "call,static/MethodHandle " %}
+  opcode(0xE8); /* E8 cd */
+  ins_encode(preserve_SP,
+             Java_Static_Call(meth),
+             restore_SP,
+             call_epilog);
+  ins_pipe(pipe_slow);
+  ins_pc_relative(1);
+  ins_alignment(4);
+%}
+
 // Call Java Dynamic Instruction
 // Note: If this code changes, the corresponding ret_addr_offset() and
 //       compute_padding() functions will have to be adjusted.
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -145,7 +145,7 @@
     }
     else if (istate->msg() == BytecodeInterpreter::return_from_method) {
       // Copy the result into the caller's frame
-      result_slots = type2size[method->result_type()];
+      result_slots = type2size[result_type_of(method)];
       assert(result_slots >= 0 && result_slots <= 2, "what?");
       result = istate->stack() + result_slots;
       break;
@@ -204,6 +204,20 @@
     goto unwind_and_return;
   }
 
+  // Update the invocation counter
+  if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
+    thread->set_do_not_unlock();
+    InvocationCounter *counter = method->invocation_counter();
+    counter->increment();
+    if (counter->reached_InvocationLimit()) {
+      CALL_VM_NOCHECK(
+        InterpreterRuntime::frequency_counter_overflow(thread, NULL));
+      if (HAS_PENDING_EXCEPTION)
+        goto unwind_and_return;
+    }
+    thread->clr_do_not_unlock();
+  }
+
   // Lock if necessary
   BasicObjectLock *monitor;
   monitor = NULL;
@@ -231,7 +245,7 @@
     if (handlerAddr == NULL) {
       CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method));
       if (HAS_PENDING_EXCEPTION)
-        goto unwind_and_return;
+        goto unlock_unwind_and_return;
 
       handlerAddr = method->signature_handler();
       assert(handlerAddr != NULL, "eh?");
@@ -240,7 +254,7 @@
       CALL_VM_NOCHECK(handlerAddr =
         InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL));
       if (HAS_PENDING_EXCEPTION)
-        goto unwind_and_return;
+        goto unlock_unwind_and_return;
     }
     handler = \
       InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr);
@@ -351,10 +365,10 @@
   // Reset handle block
   thread->active_handles()->clear();
 
-  // Unlock if necessary.  It seems totally wrong that this
-  // is skipped in the event of an exception but apparently
-  // the template interpreter does this so we do too.
-  if (monitor && !HAS_PENDING_EXCEPTION) {
+ unlock_unwind_and_return:
+
+  // Unlock if necessary
+  if (monitor) {
     BasicLock *lock = monitor->lock();
     markOop header = lock->displaced_header();
     oop rcvr = monitor->obj();
@@ -380,9 +394,10 @@
 
   // Push our result
   if (!HAS_PENDING_EXCEPTION) {
-    stack->set_sp(stack->sp() - type2size[method->result_type()]);
+    BasicType type = result_type_of(method);
+    stack->set_sp(stack->sp() - type2size[type]);
 
-    switch (method->result_type()) {
+    switch (type) {
     case T_VOID:
       break;
 
@@ -693,6 +708,26 @@
   return i;
 }
 
+BasicType CppInterpreter::result_type_of(methodOop method) {
+  BasicType t;
+  switch (method->result_index()) {
+    case 0 : t = T_BOOLEAN; break;
+    case 1 : t = T_CHAR;    break;
+    case 2 : t = T_BYTE;    break;
+    case 3 : t = T_SHORT;   break;
+    case 4 : t = T_INT;     break;
+    case 5 : t = T_LONG;    break;
+    case 6 : t = T_VOID;    break;
+    case 7 : t = T_FLOAT;   break;
+    case 8 : t = T_DOUBLE;  break;
+    case 9 : t = T_OBJECT;  break;
+    default: ShouldNotReachHere();
+  }
+  assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
+         "out of step with AbstractInterpreter::BasicType_as_index");
+  return t;
+}
+
 address InterpreterGenerator::generate_empty_entry() {
   if (!UseFastEmptyMethods)
     return NULL;
--- a/src/cpu/zero/vm/cppInterpreter_zero.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/zero/vm/cppInterpreter_zero.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,3 +41,7 @@
  private:
   // Stack overflow checks
   static bool stack_overflow_imminent(JavaThread *thread);
+
+ private:
+  // Fast result type determination
+  static BasicType result_type_of(methodOop method);
--- a/src/cpu/zero/vm/frame_zero.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/zero/vm/frame_zero.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -36,11 +36,8 @@
   return zeroframe()->is_interpreter_frame();
 }
 
-bool frame::is_fake_stub_frame() const {
-  return zeroframe()->is_fake_stub_frame();
-}
-
 frame frame::sender_for_entry_frame(RegisterMap *map) const {
+  assert(zeroframe()->is_entry_frame(), "wrong type of frame");
   assert(map != NULL, "map must be set");
   assert(!entry_frame_is_first(), "next Java fp must be non zero");
   assert(entry_frame_call_wrapper()->anchor()->last_Java_sp() == sender_sp(),
@@ -50,15 +47,10 @@
   return frame(sender_sp(), sp() + 1);
 }
 
-frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
-  return frame(sender_sp(), sp() + 1);
-}
-
-frame frame::sender_for_compiled_frame(RegisterMap *map) const {
-  return frame(sender_sp(), sp() + 1);
-}
-
-frame frame::sender_for_fake_stub_frame(RegisterMap *map) const {
+frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
+  assert(zeroframe()->is_interpreter_frame() ||
+         zeroframe()->is_shark_frame() ||
+         zeroframe()->is_fake_stub_frame(), "wrong type of frame");
   return frame(sender_sp(), sp() + 1);
 }
 
@@ -69,17 +61,8 @@
 
   if (is_entry_frame())
     return sender_for_entry_frame(map);
-
-  if (is_interpreted_frame())
-    return sender_for_interpreter_frame(map);
-
-  if (is_compiled_frame())
-    return sender_for_compiled_frame(map);
-
-  if (is_fake_stub_frame())
-    return sender_for_fake_stub_frame(map);
-
-  ShouldNotReachHere();
+  else
+    return sender_for_nonentry_frame(map);
 }
 
 #ifdef CC_INTERP
--- a/src/cpu/zero/vm/frame_zero.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/zero/vm/frame_zero.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -65,10 +65,7 @@
   }
 
  public:
-  bool is_fake_stub_frame() const;
-
- public:
-  frame sender_for_fake_stub_frame(RegisterMap* map) const;
+  frame sender_for_nonentry_frame(RegisterMap* map) const;
 
  public:
   void zero_print_on_error(int           index,
--- a/src/cpu/zero/vm/globals_zero.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/zero/vm/globals_zero.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,8 @@
  *
  */
 
-//
 // Set the default values for platform dependent flags used by the
 // runtime system.  See globals.hpp for details of what they do.
-//
 
 define_pd_global(bool,  ConvertSleepToYield,  true);
 define_pd_global(bool,  ShareVtableStubs,     true);
@@ -37,19 +35,12 @@
 define_pd_global(bool,  UncommonNullCast,     true);
 
 define_pd_global(intx,  CodeEntryAlignment,   32);
-define_pd_global(uintx, TLABSize,             0);
-#ifdef _LP64
-define_pd_global(uintx, NewSize,              ScaleForWordSize(2048 * K));
-#else
-define_pd_global(uintx, NewSize,              ScaleForWordSize(1024 * K));
-#endif // _LP64
 define_pd_global(intx,  InlineFrequencyCount, 100);
-define_pd_global(intx,  InlineSmallCode,      1000);
 define_pd_global(intx,  PreInflateSpin,       10);
 
 define_pd_global(intx,  StackYellowPages,     2);
 define_pd_global(intx,  StackRedPages,        1);
-define_pd_global(intx,  StackShadowPages,     3 LP64_ONLY(+3) DEBUG_ONLY(+3));
+define_pd_global(intx,  StackShadowPages,     5 LP64_ONLY(+1) DEBUG_ONLY(+3));
 
 define_pd_global(bool,  RewriteBytecodes,     true);
 define_pd_global(bool,  RewriteFrequentPairs, true);
--- a/src/cpu/zero/vm/interpreter_zero.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/zero/vm/interpreter_zero.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,10 @@
   return ShouldNotCallThisEntry();
 }
 
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  return true;
+}
+
 int AbstractInterpreter::size_activation(methodOop method,
                                          int tempcount,
                                          int popframe_extra_args,
--- a/src/cpu/zero/vm/sharedRuntime_zero.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,8 +47,10 @@
                         int total_args_passed,
                         int comp_args_on_stack,
                         const BasicType *sig_bt,
-                        const VMRegPair *regs) {
-  return new AdapterHandlerEntry(
+                        const VMRegPair *regs,
+                        AdapterFingerPrint *fingerprint) {
+  return AdapterHandlerLibrary::new_entry(
+    fingerprint,
     ShouldNotCallThisStub(),
     ShouldNotCallThisStub(),
     ShouldNotCallThisStub());
@@ -61,7 +63,14 @@
                                                 BasicType *in_sig_bt,
                                                 VMRegPair *in_regs,
                                                 BasicType ret_type) {
+#ifdef SHARK
+  return SharkCompiler::compiler()->generate_native_wrapper(masm,
+                                                            method,
+                                                            in_sig_bt,
+                                                            ret_type);
+#else
   ShouldNotCallThis();
+#endif // SHARK
 }
 
 int Deoptimization::last_frame_adjust(int callee_parameters,
--- a/src/cpu/zero/vm/sharkFrame_zero.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/cpu/zero/vm/sharkFrame_zero.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008 Red Hat, Inc.
+ * Copyright 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
 // |  ...               |
 
 class SharkFrame : public ZeroFrame {
-  friend class SharkFunction;
+  friend class SharkStack;
 
  private:
   SharkFrame() : ZeroFrame() {
--- a/src/os/linux/vm/os_linux.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os/linux/vm/os_linux.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -223,8 +223,8 @@
                      "environment on Linux when /proc filesystem is not mounted.";
 
 void os::Linux::initialize_system_info() {
-  _processor_count = sysconf(_SC_NPROCESSORS_CONF);
-  if (_processor_count == 1) {
+  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
+  if (processor_count() == 1) {
     pid_t pid = os::Linux::gettid();
     char fname[32];
     jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
@@ -236,7 +236,7 @@
     }
   }
   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
-  assert(_processor_count > 0, "linux error");
+  assert(processor_count() > 0, "linux error");
 }
 
 void os::init_system_properties_values() {
@@ -4683,6 +4683,7 @@
   // Return immediately if a permit is available.
   if (_counter > 0) {
       _counter = 0 ;
+      OrderAccess::fence();
       return ;
   }
 
@@ -4725,6 +4726,7 @@
     _counter = 0;
     status = pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
+    OrderAccess::fence();
     return;
   }
 
@@ -4765,6 +4767,7 @@
     jt->java_suspend_self();
   }
 
+  OrderAccess::fence();
 }
 
 void Parker::unpark() {
--- a/src/os/solaris/dtrace/libjvm_db.c	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os/solaris/dtrace/libjvm_db.c	Wed Mar 24 17:16:33 2010 -0700
@@ -937,54 +937,56 @@
   return err;
 }
 
-static int
-scopeDesc_chain(Nmethod_t *N)
-{
+static int scopeDesc_chain(Nmethod_t *N) {
   int32_t decode_offset = 0;
   int32_t err;
 
-  if (debug > 2)
-      fprintf(stderr, "\t scopeDesc_chain: BEGIN\n");
+  if (debug > 2) {
+    fprintf(stderr, "\t scopeDesc_chain: BEGIN\n");
+  }
 
   err = ps_pread(N->J->P, N->pc_desc + OFFSET_PcDesc_scope_decode_offset,
                  &decode_offset, SZ32);
   CHECK_FAIL(err);
 
   while (decode_offset > 0) {
-      if (debug > 2)
-          fprintf(stderr, "\t scopeDesc_chain: decode_offset: %#x\n", decode_offset);
+    Vframe_t *vf = &N->vframes[N->vf_cnt];
 
-      Vframe_t *vf = &N->vframes[N->vf_cnt];
+    if (debug > 2) {
+      fprintf(stderr, "\t scopeDesc_chain: decode_offset: %#x\n", decode_offset);
+    }
+
+    err = scope_desc_at(N, decode_offset, vf);
+    CHECK_FAIL(err);
 
-      err = scope_desc_at(N, decode_offset, vf);
-      CHECK_FAIL(err);
+    if (vf->methodIdx > N->oops_len) {
+      fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
+      return -1;
+    }
+    err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
+                       &vf->methodOop);
+    CHECK_FAIL(err);
 
-      if (vf->methodIdx > N->oops_len) {
-          fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
-          return -1;
-      }
-      err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
-                               &vf->methodOop);
+    if (vf->methodOop) {
+      N->vf_cnt++;
+      err = line_number_from_bci(N->J, vf);
       CHECK_FAIL(err);
-
-      if (vf->methodOop) {
-          N->vf_cnt++;
-          err = line_number_from_bci(N->J, vf);
-          CHECK_FAIL(err);
-          if (debug > 2) {
-              fprintf(stderr, "\t scopeDesc_chain: methodOop: %#8llx, line: %ld\n",
-                              vf->methodOop, vf->line);
-          }
+      if (debug > 2) {
+        fprintf(stderr, "\t scopeDesc_chain: methodOop: %#8llx, line: %ld\n",
+                vf->methodOop, vf->line);
       }
-      decode_offset = vf->sender_decode_offset;
+    }
+    decode_offset = vf->sender_decode_offset;
   }
-  if (debug > 2)
-      fprintf(stderr, "\t scopeDesc_chain: END \n\n");
+  if (debug > 2) {
+    fprintf(stderr, "\t scopeDesc_chain: END \n\n");
+  }
   return PS_OK;
 
  fail:
-  if (debug)
-      fprintf(stderr, "\t scopeDesc_chain: FAIL \n\n");
+  if (debug) {
+    fprintf(stderr, "\t scopeDesc_chain: FAIL \n\n");
+  }
   return err;
 }
 
--- a/src/os/solaris/vm/os_solaris.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os/solaris/vm/os_solaris.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -457,7 +457,7 @@
 
 
 void os::Solaris::initialize_system_info() {
-  _processor_count = sysconf(_SC_NPROCESSORS_CONF);
+  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 }
@@ -5803,6 +5803,7 @@
   // Return immediately if a permit is available.
   if (_counter > 0) {
       _counter = 0 ;
+      OrderAccess::fence();
       return ;
   }
 
@@ -5846,6 +5847,7 @@
     _counter = 0;
     status = os::Solaris::mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
+    OrderAccess::fence();
     return;
   }
 
@@ -5892,6 +5894,7 @@
     jt->java_suspend_self();
   }
 
+  OrderAccess::fence();
 }
 
 void Parker::unpark() {
--- a/src/os/windows/vm/os_windows.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os/windows/vm/os_windows.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -142,6 +142,9 @@
 }
 
 #ifndef _WIN64
+// previous UnhandledExceptionFilter, if there is one
+static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
+
 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 #endif
 void os::init_system_properties_values() {
@@ -260,7 +263,8 @@
   }
 
 #ifndef _WIN64
-  SetUnhandledExceptionFilter(Handle_FLT_Exception);
+  // set our UnhandledExceptionFilter and save any previous one
+  prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 #endif
 
   // Done
@@ -1969,7 +1973,7 @@
 #ifndef  _WIN64
 //-----------------------------------------------------------------------------
 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
-  // handle exception caused by native mothod modifying control word
+  // handle exception caused by native method modifying control word
   PCONTEXT ctx = exceptionInfo->ContextRecord;
   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
 
@@ -1990,6 +1994,13 @@
         return EXCEPTION_CONTINUE_EXECUTION;
       }
   }
+
+  if (prev_uef_handler != NULL) {
+    // We didn't handle this exception so pass it to the previous
+    // UnhandledExceptionFilter.
+    return (prev_uef_handler)(exceptionInfo);
+  }
+
   return EXCEPTION_CONTINUE_SEARCH;
 }
 #else //_WIN64
@@ -3150,7 +3161,7 @@
   _vm_allocation_granularity = si.dwAllocationGranularity;
   _processor_type  = si.dwProcessorType;
   _processor_level = si.wProcessorLevel;
-  _processor_count = si.dwNumberOfProcessors;
+  set_processor_count(si.dwNumberOfProcessors);
 
   MEMORYSTATUSEX ms;
   ms.dwLength = sizeof(ms);
--- a/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -22,10 +22,9 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
+
 define_pd_global(bool, DontYieldALot,            false);
 #ifdef AMD64
 define_pd_global(intx, ThreadStackSize,          1024); // 0 => use system default
@@ -39,11 +38,10 @@
 #endif // AMD64
 
 define_pd_global(intx, CompilerThreadStackSize,  0);
-define_pd_global(intx, SurvivorRatio,            8);
 
-define_pd_global(uintx, JVMInvokeMethodSlack,    8192);
+define_pd_global(uintx,JVMInvokeMethodSlack,     8192);
 
 // Only used on 64 bit platforms
-define_pd_global(uintx, HeapBaseMinAddress,      2*G);
+define_pd_global(uintx,HeapBaseMinAddress,       2*G);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions,    false);
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -239,7 +239,21 @@
 }
 
 bool os::is_allocatable(size_t bytes) {
-  ShouldNotCallThis();
+#ifdef _LP64
+  return true;
+#else
+  if (bytes < 2 * G) {
+    return true;
+  }
+
+  char* addr = reserve_memory(bytes, NULL);
+
+  if (addr != NULL) {
+    release_memory(addr, bytes);
+  }
+
+  return addr != NULL;
+#endif // _LP64
 }
 
 ///////////////////////////////////////////////////////////////////////////////
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2004 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,13 @@
                   "stfd %0, 0(%2)\n"
                   : "=f"(tmp)
                   : "b"(src), "b"(dst));
+#elif defined(S390) && !defined(_LP64)
+    double tmp;
+    asm volatile ("ld  %0, 0(%1)\n"
+                  "std %0, 0(%2)\n"
+                  : "=r"(tmp)
+                  : "a"(src), "a"(dst));
 #else
     *(jlong *) dst = *(jlong *) src;
-#endif // PPC && !_LP64
+#endif
   }
--- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -22,31 +22,25 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
+
 define_pd_global(bool, DontYieldALot,            true); // Determined in the design center
 #ifdef AMD64
 define_pd_global(intx, ThreadStackSize,          1024); // 0 => use system default
 define_pd_global(intx, VMThreadStackSize,        1024);
-define_pd_global(intx, SurvivorRatio,            6);
-define_pd_global(uintx, JVMInvokeMethodSlack,    8*K);
+define_pd_global(uintx,JVMInvokeMethodSlack,     8*K);
 #else
-//  UseStackBanging is not pd
-// define_pd_global(bool, UseStackBanging,          true);
-
 // ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
 // to run while keeping the number of threads that can be created high.
 define_pd_global(intx, ThreadStackSize,          320);
 define_pd_global(intx, VMThreadStackSize,        512);
-define_pd_global(intx, SurvivorRatio,            8);
-define_pd_global(uintx, JVMInvokeMethodSlack,    10*K);
+define_pd_global(uintx,JVMInvokeMethodSlack,     10*K);
 #endif // AMD64
 
 define_pd_global(intx, CompilerThreadStackSize,  0);
 
 // Only used on 64 bit platforms
-define_pd_global(uintx, HeapBaseMinAddress,      256*M);
+define_pd_global(uintx,HeapBaseMinAddress,       256*M);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions,    false);
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -730,11 +730,12 @@
   st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
   st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
   st->cr();
-  st->print(", R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
+  st->print(  "R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
   st->print(", R9=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
   st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
   st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
-  st->print(", R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
+  st->cr();
+  st->print(  "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
   st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
   st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
   st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
--- a/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -22,10 +22,9 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
+
 define_pd_global(bool, DontYieldALot,            false);
 
 // Default stack size on Windows is determined by the executable (java.exe
@@ -35,8 +34,6 @@
 define_pd_global(intx, ThreadStackSize,          0); // 0 => use system default
 define_pd_global(intx, VMThreadStackSize,        0); // 0 => use system default
 
-define_pd_global(intx, SurvivorRatio,            8);
-
 #ifdef ASSERT
 define_pd_global(intx, CompilerThreadStackSize,  1024);
 #else
--- a/src/share/vm/adlc/output_c.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/adlc/output_c.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1496,7 +1496,7 @@
   unsigned      i;
 
   // Generate Expand function header
-  fprintf(fp,"MachNode *%sNode::Expand(State *state, Node_List &proj_list) {\n", node->_ident);
+  fprintf(fp,"MachNode *%sNode::Expand(State *state, Node_List &proj_list, Node* mem) {\n", node->_ident);
   fprintf(fp,"Compile* C = Compile::current();\n");
   // Generate expand code
   if( node->expands() ) {
@@ -1546,15 +1546,16 @@
     // Build a mapping from operand index to input edges
     fprintf(fp,"  unsigned idx0 = oper_input_base();\n");
 
-    // The order in which inputs are added to a node is very
+    // The order in which the memory input is added to a node is very
     // strange.  Store nodes get a memory input before Expand is
-    // called and all other nodes get it afterwards so
-    // oper_input_base is wrong during expansion.  This code adjusts
-    // is so that expansion will work correctly.
-    bool missing_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames) &&
-                               node->is_ideal_store() == Form::none;
-    if (missing_memory_edge) {
-      fprintf(fp,"  idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
+    // called and other nodes get it afterwards or before depending on
+    // match order so oper_input_base is wrong during expansion.  This
+    // code adjusts it so that expansion will work correctly.
+    int has_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames);
+    if (has_memory_edge) {
+      fprintf(fp,"  if (mem == (Node*)1) {\n");
+      fprintf(fp,"    idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
+      fprintf(fp,"  }\n");
     }
 
     for( i = 0; i < node->num_opnds(); i++ ) {
@@ -1611,9 +1612,11 @@
         int node_mem_op = node->memory_operand(_globalNames);
         assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND,
                 "expand rule member needs memory but top-level inst doesn't have any" );
-        if (!missing_memory_edge) {
+        if (has_memory_edge) {
           // Copy memory edge
-          fprintf(fp,"  n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+          fprintf(fp,"  if (mem != (Node*)1) {\n");
+          fprintf(fp,"    n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+          fprintf(fp,"  }\n");
         }
       }
 
@@ -1689,7 +1692,7 @@
       } // done iterating over a new instruction's operands
 
       // Invoke Expand() for the newly created instruction.
-      fprintf(fp,"  result = n%d->Expand( state, proj_list );\n", cnt);
+      fprintf(fp,"  result = n%d->Expand( state, proj_list, mem );\n", cnt);
       assert( !new_inst->expands(), "Do not have complete support for recursive expansion");
     } // done iterating over new instructions
     fprintf(fp,"\n");
--- a/src/share/vm/adlc/output_h.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/adlc/output_h.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1754,7 +1754,7 @@
         instr->has_temps() ||
         instr->_matrule != NULL &&
         instr->num_opnds() != instr->num_unique_opnds() ) {
-      fprintf(fp,"  virtual MachNode      *Expand(State *state, Node_List &proj_list);\n");
+      fprintf(fp,"  virtual MachNode      *Expand(State *state, Node_List &proj_list, Node* mem);\n");
     }
 
     if (instr->is_pinned(_globalNames)) {
--- a/src/share/vm/asm/codeBuffer.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/asm/codeBuffer.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@
                  Dtrace_trap = OSR_Entry,  // dtrace probes can never have an OSR entry so reuse it
                  Exceptions,     // Offset where exception handler lives
                  Deopt,          // Offset where deopt handler lives
+                 DeoptMH,        // Offset where MethodHandle deopt handler lives
                  max_Entries };
 
   // special value to note codeBlobs where profile (forte) stack walking is
@@ -51,12 +52,13 @@
 
 public:
   CodeOffsets() {
-    _values[Entry] = 0;
+    _values[Entry         ] = 0;
     _values[Verified_Entry] = 0;
     _values[Frame_Complete] = frame_never_safe;
-    _values[OSR_Entry] = 0;
-    _values[Exceptions] = -1;
-    _values[Deopt] = -1;
+    _values[OSR_Entry     ] = 0;
+    _values[Exceptions    ] = -1;
+    _values[Deopt         ] = -1;
+    _values[DeoptMH       ] = -1;
   }
 
   int value(Entries e) { return _values[e]; }
--- a/src/share/vm/c1/c1_Compilation.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/c1/c1_Compilation.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -205,6 +205,8 @@
 void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
   CHECK_BAILOUT();
 
+  CodeOffsets* code_offsets = assembler->offsets();
+
   // generate code or slow cases
   assembler->emit_slow_case_stubs();
   CHECK_BAILOUT();
@@ -213,10 +215,18 @@
   assembler->emit_exception_entries(exception_info_list());
   CHECK_BAILOUT();
 
-  // generate code for exception handler
-  assembler->emit_exception_handler();
+  // Generate code for exception handler.
+  code_offsets->set_value(CodeOffsets::Exceptions, assembler->emit_exception_handler());
   CHECK_BAILOUT();
-  assembler->emit_deopt_handler();
+
+  // Generate code for deopt handler.
+  code_offsets->set_value(CodeOffsets::Deopt, assembler->emit_deopt_handler());
+  CHECK_BAILOUT();
+
+  // Generate code for MethodHandle deopt handler.  We can use the
+  // same code as for the normal deopt handler, we just need a
+  // different entry point address.
+  code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
   CHECK_BAILOUT();
 
   // done
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -365,7 +365,7 @@
     if (_next_loop_index < 31) _next_loop_index++;
   } else {
     // block already marked as loop header
-    assert(is_power_of_2(_loop_map.at(block->block_id())), "exactly one bit must be set");
+    assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
   }
 }
 
--- a/src/share/vm/c1/c1_IR.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/c1/c1_IR.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -251,8 +251,10 @@
     DebugToken* expvals = recorder->create_scope_values(expressions());
     DebugToken* monvals = recorder->create_monitor_values(monitors());
     // reexecute allowed only for the topmost frame
-    bool      reexecute = topmost ? should_reexecute() : false;
-    recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, locvals, expvals, monvals);
+    bool reexecute = topmost ? should_reexecute() : false;
+    bool is_method_handle_invoke = false;
+    bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
+    recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, return_oop, locvals, expvals, monvals);
   }
 };
 
--- a/src/share/vm/c1/c1_LIR.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/c1/c1_LIR.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2000,7 +2000,7 @@
   typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
 
   enum {
-    maxNumberOfOperands = 14,
+    maxNumberOfOperands = 16,
     maxNumberOfInfos = 4
   };
 
--- a/src/share/vm/c1/c1_LIRAssembler.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,9 +133,9 @@
   void add_call_info_here(CodeEmitInfo* info)                              { add_call_info(code_offset(), info); }
 
   // code patterns
-  void emit_exception_handler();
+  int  emit_exception_handler();
   void emit_exception_entries(ExceptionInfoList* info_list);
-  void emit_deopt_handler();
+  int  emit_deopt_handler();
 
   void emit_code(BlockList* hir);
   void emit_block(BlockBegin* block);
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1765,7 +1765,7 @@
     __ null_check(exception_opr, new CodeEmitInfo(info, true));
   }
 
-  if (compilation()->env()->jvmti_can_post_exceptions() &&
+  if (compilation()->env()->jvmti_can_post_on_exceptions() &&
       !block()->is_set(BlockBegin::default_exception_handler_flag)) {
     // we need to go through the exception lookup path to get JVMTI
     // notification done
@@ -1855,12 +1855,26 @@
     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
   } else {
 #ifdef X86
+#ifdef _LP64
+    if (!index_op->is_illegal() && index_op->type() == T_INT) {
+      LIR_Opr tmp = new_pointer_register();
+      __ convert(Bytecodes::_i2l, index_op, tmp);
+      index_op = tmp;
+    }
+#endif
     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
 #else
     if (index_op->is_illegal() || log2_scale == 0) {
+#ifdef _LP64
+      if (!index_op->is_illegal() && index_op->type() == T_INT) {
+        LIR_Opr tmp = new_pointer_register();
+        __ convert(Bytecodes::_i2l, index_op, tmp);
+        index_op = tmp;
+      }
+#endif
       addr = new LIR_Address(base_op, index_op, dst_type);
     } else {
-      LIR_Opr tmp = new_register(T_INT);
+      LIR_Opr tmp = new_pointer_register();
       __ shift_left(index_op, log2_scale, tmp);
       addr = new LIR_Address(base_op, tmp, dst_type);
     }
@@ -1915,10 +1929,25 @@
   LIR_Opr index_op = idx.result();
   if (log2_scale != 0) {
     // temporary fix (platform dependent code without shift on Intel would be better)
-    index_op = new_register(T_INT);
-    __ move(idx.result(), index_op);
+    index_op = new_pointer_register();
+#ifdef _LP64
+    if(idx.result()->type() == T_INT) {
+      __ convert(Bytecodes::_i2l, idx.result(), index_op);
+    } else {
+#endif
+      __ move(idx.result(), index_op);
+#ifdef _LP64
+    }
+#endif
     __ shift_left(index_op, log2_scale, index_op);
   }
+#ifdef _LP64
+  else if(!index_op->is_illegal() && index_op->type() == T_INT) {
+    LIR_Opr tmp = new_pointer_register();
+    __ convert(Bytecodes::_i2l, index_op, tmp);
+    index_op = tmp;
+  }
+#endif
 
   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
   __ move(value.result(), addr);
--- a/src/share/vm/c1/c1_LinearScan.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -2464,6 +2464,10 @@
 
     case T_LONG: // fall through
     case T_DOUBLE: {
+#ifdef _LP64
+      scope_values->append(&_int_0_scope_value);
+      scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
+#else
       if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
         scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
         scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
@@ -2471,7 +2475,7 @@
         scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
         scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
       }
-
+#endif
       return 2;
     }
 
@@ -2503,17 +2507,18 @@
   } else if (opr->is_single_cpu()) {
     bool is_oop = opr->is_oop_register();
     int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0);
+    Location::Type int_loc_type = NOT_LP64(Location::normal) LP64_ONLY(Location::int_in_long);
 
     ScopeValue* sv = _scope_value_cache.at(cache_idx);
     if (sv == NULL) {
-      Location::Type loc_type = is_oop ? Location::oop : Location::normal;
+      Location::Type loc_type = is_oop ? Location::oop : int_loc_type;
       VMReg rname = frame_map()->regname(opr);
       sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
       _scope_value_cache.at_put(cache_idx, sv);
     }
 
     // check if cached value is correct
-    DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : Location::normal, frame_map()->regname(opr)))));
+    DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : int_loc_type, frame_map()->regname(opr)))));
 
     scope_values->append(sv);
     return 1;
--- a/src/share/vm/c1/c1_Runtime1.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -110,8 +110,8 @@
     RegisterMap reg_map(thread, false);
     frame runtime_frame = thread->last_frame();
     frame caller_frame = runtime_frame.sender(&reg_map);
-    VM_DeoptimizeFrame deopt(thread, caller_frame.id());
-    VMThread::execute(&deopt);
+    // bypass VM_DeoptimizeFrame and deoptimize the frame directly
+    Deoptimization::deoptimize_frame(thread, caller_frame.id());
     assert(caller_is_deopted(), "Must be deoptimized");
   }
 }
@@ -354,7 +354,7 @@
 
 
 JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
-  if (JvmtiExport::can_post_exceptions()) {
+  if (JvmtiExport::can_post_on_exceptions()) {
     vframeStream vfst(thread, true);
     address bcp = vfst.method()->bcp_from(vfst.bci());
     JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
@@ -425,7 +425,7 @@
   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
   assert(exception->is_oop(), "just checking");
   // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
-  if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
     if (ExitVMOnVerifyError) vm_exit(-1);
     ShouldNotReachHere();
   }
@@ -437,7 +437,7 @@
   bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 
-  if (JvmtiExport::can_post_exceptions()) {
+  if (JvmtiExport::can_post_on_exceptions()) {
     // To ensure correct notification of exception catches and throws
     // we have to deoptimize here.  If we attempted to notify the
     // catches and throws during this exception lookup it's possible
@@ -1075,6 +1075,7 @@
 };
 
 
+// Below length is the # elements copied.
 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
                                           oopDesc* dst, T* dst_addr,
                                           int length) {
@@ -1083,22 +1084,22 @@
   // barrier. The assert will fail if this is not the case.
   // Note that we use the non-virtual inlineable variant of write_ref_array.
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->has_write_ref_array_opt(),
-         "Barrier set must have ref array opt");
+  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+  assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
   if (src == dst) {
     // same object, no check
+    bs->write_ref_array_pre(dst_addr, length);
     Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
-    bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
-                                  (HeapWord*)(dst_addr + length)));
+    bs->write_ref_array((HeapWord*)dst_addr, length);
     return ac_ok;
   } else {
     klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
     klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
     if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
       // Elements are guaranteed to be subtypes, so no check necessary
+      bs->write_ref_array_pre(dst_addr, length);
       Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
-      bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
-                                    (HeapWord*)(dst_addr + length)));
+      bs->write_ref_array((HeapWord*)dst_addr, length);
       return ac_ok;
     }
   }
@@ -1162,9 +1163,16 @@
 #endif
 
   if (num == 0) return;
+  BarrierSet* bs = Universe::heap()->barrier_set();
+  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+  assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
+  if (UseCompressedOops) {
+    bs->write_ref_array_pre((narrowOop*)dst, num);
+  } else {
+    bs->write_ref_array_pre((oop*)dst, num);
+  }
   Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  bs->write_ref_array(MemRegion(dst, dst + num));
+  bs->write_ref_array(dst, num);
 JRT_END
 
 
--- a/src/share/vm/ci/bcEscapeAnalyzer.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/bcEscapeAnalyzer.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -61,9 +61,11 @@
   BCEscapeAnalyzer* _parent;
   int               _level;
 
+ public:
   class  ArgumentMap;
   class  StateInfo;
 
+ private:
   // helper functions
   bool is_argument(int i)    { return i >= 0 && i < _arg_size; }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/ci/ciCPCache.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_ciCPCache.cpp.incl"
+
+// ciCPCache
+
+// ------------------------------------------------------------------
+// ciCPCache::get_f1_offset
+size_t ciCPCache::get_f1_offset(int index) {
+  // Calculate the offset from the constantPoolCacheOop to the f1
+  // field.
+  ByteSize f1_offset =
+    constantPoolCacheOopDesc::entry_offset(index) +
+    ConstantPoolCacheEntry::f1_offset();
+
+  return in_bytes(f1_offset);
+}
+
+
+// ------------------------------------------------------------------
+// ciCPCache::print
+//
+// Print debugging information about the cache.
+void ciCPCache::print() {
+  Unimplemented();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/ci/ciCPCache.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ciCPCache
+//
+// This class represents a constant pool cache.
+//
+// Note: This class is called ciCPCache as ciConstantPoolCache is used
+// for something different.
+class ciCPCache : public ciObject {
+public:
+  ciCPCache(constantPoolCacheHandle cpcache) : ciObject(cpcache) {}
+
+  // What kind of ciObject is this?
+  bool is_cpcache() const { return true; }
+
+  // Get the offset in bytes from the oop to the f1 field of the
+  // requested entry.
+  size_t get_f1_offset(int index);
+
+  void print();
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/ci/ciCallSite.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_ciCallSite.cpp.incl"
+
+// ciCallSite
+
+// ------------------------------------------------------------------
+// ciCallSite::get_target
+//
+// Return the target MethodHandle of this CallSite.
+ciMethodHandle* ciCallSite::get_target() const {
+  VM_ENTRY_MARK;
+  oop method_handle_oop = java_dyn_CallSite::target(get_oop());
+  return CURRENT_ENV->get_object(method_handle_oop)->as_method_handle();
+}
+
+// ------------------------------------------------------------------
+// ciCallSite::print
+//
+// Print debugging information about the CallSite.
+void ciCallSite::print() {
+  Unimplemented();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/ci/ciCallSite.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ciCallSite
+//
+// The class represents a java.dyn.CallSite object.
+class ciCallSite : public ciInstance {
+public:
+  ciCallSite(instanceHandle h_i) : ciInstance(h_i) {}
+
+  // What kind of ciObject is this?
+  bool is_call_site() const { return true; }
+
+  // Return the target MethodHandle of this CallSite.
+  ciMethodHandle* get_target() const;
+
+  void print();
+};
--- a/src/share/vm/ci/ciClassList.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciClassList.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -25,6 +25,7 @@
 class ciEnv;
 class ciObjectFactory;
 class ciConstantPoolCache;
+class ciCPCache;
 
 class ciField;
 class ciConstant;
@@ -42,6 +43,8 @@
 class ciObject;
 class   ciNullObject;
 class   ciInstance;
+class     ciCallSite;
+class     ciMethodHandle;
 class   ciMethod;
 class   ciMethodData;
 class     ciReceiverTypeData;  // part of ciMethodData
@@ -78,6 +81,7 @@
 // Any more access must be given explicitly.
 #define CI_PACKAGE_ACCESS_TO           \
 friend class ciObjectFactory;          \
+friend class ciCallSite;               \
 friend class ciConstantPoolCache;      \
 friend class ciField;                  \
 friend class ciConstant;               \
@@ -93,6 +97,7 @@
 friend class ciInstance;               \
 friend class ciMethod;                 \
 friend class ciMethodData;             \
+friend class ciMethodHandle;           \
 friend class ciReceiverTypeData;       \
 friend class ciSymbol;                 \
 friend class ciArray;                  \
--- a/src/share/vm/ci/ciEnv.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciEnv.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,14 +38,9 @@
 ciTypeArrayKlassKlass* ciEnv::_type_array_klass_klass_instance;
 ciObjArrayKlassKlass*  ciEnv::_obj_array_klass_klass_instance;
 
-ciInstanceKlass* ciEnv::_ArrayStoreException;
-ciInstanceKlass* ciEnv::_Class;
-ciInstanceKlass* ciEnv::_ClassCastException;
-ciInstanceKlass* ciEnv::_Object;
-ciInstanceKlass* ciEnv::_Throwable;
-ciInstanceKlass* ciEnv::_Thread;
-ciInstanceKlass* ciEnv::_OutOfMemoryError;
-ciInstanceKlass* ciEnv::_String;
+#define WK_KLASS_DEFN(name, ignore_s, ignore_o) ciInstanceKlass* ciEnv::_##name = NULL;
+WK_KLASSES_DO(WK_KLASS_DEFN)
+#undef WK_KLASS_DEFN
 
 ciSymbol*        ciEnv::_unloaded_cisymbol = NULL;
 ciInstanceKlass* ciEnv::_unloaded_ciinstance_klass = NULL;
@@ -110,6 +105,8 @@
   _ArrayIndexOutOfBoundsException_instance = NULL;
   _ArrayStoreException_instance = NULL;
   _ClassCastException_instance = NULL;
+  _the_null_string = NULL;
+  _the_min_jint_string = NULL;
 }
 
 ciEnv::ciEnv(Arena* arena) {
@@ -163,6 +160,8 @@
   _ArrayIndexOutOfBoundsException_instance = NULL;
   _ArrayStoreException_instance = NULL;
   _ClassCastException_instance = NULL;
+  _the_null_string = NULL;
+  _the_min_jint_string = NULL;
 }
 
 ciEnv::~ciEnv() {
@@ -179,7 +178,7 @@
   _jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
   _jvmti_can_examine_or_deopt_anywhere  = JvmtiExport::can_examine_or_deopt_anywhere();
   _jvmti_can_access_local_variables     = JvmtiExport::can_access_local_variables();
-  _jvmti_can_post_exceptions            = JvmtiExport::can_post_exceptions();
+  _jvmti_can_post_on_exceptions         = JvmtiExport::can_post_on_exceptions();
 }
 
 // ------------------------------------------------------------------
@@ -248,6 +247,22 @@
   return _ClassCastException_instance;
 }
 
+ciInstance* ciEnv::the_null_string() {
+  if (_the_null_string == NULL) {
+    VM_ENTRY_MARK;
+    _the_null_string = get_object(Universe::the_null_string())->as_instance();
+  }
+  return _the_null_string;
+}
+
+ciInstance* ciEnv::the_min_jint_string() {
+  if (_the_min_jint_string == NULL) {
+    VM_ENTRY_MARK;
+    _the_min_jint_string = get_object(Universe::the_min_jint_string())->as_instance();
+  }
+  return _the_min_jint_string;
+}
+
 // ------------------------------------------------------------------
 // ciEnv::get_method_from_handle
 ciMethod* ciEnv::get_method_from_handle(jobject method) {
@@ -419,12 +434,11 @@
 // ciEnv::get_klass_by_index_impl
 //
 // Implementation of get_klass_by_index.
-ciKlass* ciEnv::get_klass_by_index_impl(ciInstanceKlass* accessor,
+ciKlass* ciEnv::get_klass_by_index_impl(constantPoolHandle cpool,
                                         int index,
-                                        bool& is_accessible) {
-  assert(accessor->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
+                                        bool& is_accessible,
+                                        ciInstanceKlass* accessor) {
   EXCEPTION_CONTEXT;
-  constantPoolHandle cpool(THREAD, accessor->get_instanceKlass()->constants());
   KlassHandle klass (THREAD, constantPoolOopDesc::klass_at_if_loaded(cpool, index));
   symbolHandle klass_name;
   if (klass.is_null()) {
@@ -486,22 +500,21 @@
 // ciEnv::get_klass_by_index
 //
 // Get a klass from the constant pool.
-ciKlass* ciEnv::get_klass_by_index(ciInstanceKlass* accessor,
+ciKlass* ciEnv::get_klass_by_index(constantPoolHandle cpool,
                                    int index,
-                                   bool& is_accessible) {
-  GUARDED_VM_ENTRY(return get_klass_by_index_impl(accessor, index, is_accessible);)
+                                   bool& is_accessible,
+                                   ciInstanceKlass* accessor) {
+  GUARDED_VM_ENTRY(return get_klass_by_index_impl(cpool, index, is_accessible, accessor);)
 }
 
 // ------------------------------------------------------------------
 // ciEnv::get_constant_by_index_impl
 //
 // Implementation of get_constant_by_index().
-ciConstant ciEnv::get_constant_by_index_impl(ciInstanceKlass* accessor,
-                                             int index) {
+ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
+                                             int index,
+                                             ciInstanceKlass* accessor) {
   EXCEPTION_CONTEXT;
-  instanceKlass* ik_accessor = accessor->get_instanceKlass();
-  assert(ik_accessor->is_linked(), "must be linked before accessing constant pool");
-  constantPoolOop cpool = ik_accessor->constants();
   constantTag tag = cpool->tag_at(index);
   if (tag.is_int()) {
     return ciConstant(T_INT, (jint)cpool->int_at(index));
@@ -529,7 +542,7 @@
   } else if (tag.is_klass() || tag.is_unresolved_klass()) {
     // 4881222: allow ldc to take a class type
     bool ignore;
-    ciKlass* klass = get_klass_by_index_impl(accessor, index, ignore);
+    ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore, accessor);
     if (HAS_PENDING_EXCEPTION) {
       CLEAR_PENDING_EXCEPTION;
       record_out_of_memory_failure();
@@ -538,6 +551,11 @@
     assert (klass->is_instance_klass() || klass->is_array_klass(),
             "must be an instance or array klass ");
     return ciConstant(T_OBJECT, klass);
+  } else if (tag.is_object()) {
+    oop obj = cpool->object_at(index);
+    assert(obj->is_instance(), "must be an instance");
+    ciObject* ciobj = get_object(obj);
+    return ciConstant(T_OBJECT, ciobj);
   } else {
     ShouldNotReachHere();
     return ciConstant();
@@ -574,9 +592,10 @@
 // Pull a constant out of the constant pool.  How appropriate.
 //
 // Implementation note: this query is currently in no way cached.
-ciConstant ciEnv::get_constant_by_index(ciInstanceKlass* accessor,
-                                        int index) {
-  GUARDED_VM_ENTRY(return get_constant_by_index_impl(accessor, index); )
+ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool,
+                                        int index,
+                                        ciInstanceKlass* accessor) {
+  GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, index, accessor);)
 }
 
 // ------------------------------------------------------------------
@@ -586,7 +605,7 @@
 //
 // Implementation note: this query is currently in no way cached.
 bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor,
-                                        int index) const {
+                                 int index) const {
   GUARDED_VM_ENTRY(return is_unresolved_string_impl(accessor->get_instanceKlass(), index); )
 }
 
@@ -597,7 +616,7 @@
 //
 // Implementation note: this query is currently in no way cached.
 bool ciEnv::is_unresolved_klass(ciInstanceKlass* accessor,
-                                        int index) const {
+                                int index) const {
   GUARDED_VM_ENTRY(return is_unresolved_klass_impl(accessor->get_instanceKlass(), index); )
 }
 
@@ -678,22 +697,17 @@
 
 // ------------------------------------------------------------------
 // ciEnv::get_method_by_index_impl
-ciMethod* ciEnv::get_method_by_index_impl(ciInstanceKlass* accessor,
-                                     int index, Bytecodes::Code bc) {
-  // Get the method's declared holder.
-
-  assert(accessor->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
-  constantPoolHandle cpool = accessor->get_instanceKlass()->constants();
+ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
+                                          int index, Bytecodes::Code bc,
+                                          ciInstanceKlass* accessor) {
   int holder_index = cpool->klass_ref_index_at(index);
   bool holder_is_accessible;
-  ciKlass* holder = get_klass_by_index_impl(accessor, holder_index, holder_is_accessible);
+  ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
   ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
 
   // Get the method's name and signature.
-  int nt_index = cpool->name_and_type_ref_index_at(index);
-  int sig_index = cpool->signature_ref_index_at(nt_index);
   symbolOop name_sym = cpool->name_ref_at(index);
-  symbolOop sig_sym = cpool->symbol_at(sig_index);
+  symbolOop sig_sym  = cpool->signature_ref_at(index);
 
   if (holder_is_accessible) { // Our declared holder is loaded.
     instanceKlass* lookup = declared_holder->get_instanceKlass();
@@ -715,6 +729,33 @@
 
 
 // ------------------------------------------------------------------
+// ciEnv::get_fake_invokedynamic_method_impl
+ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
+                                                    int index, Bytecodes::Code bc) {
+  assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
+
+  // Get the CallSite from the constant pool cache.
+  ConstantPoolCacheEntry* cpc_entry = cpool->cache()->secondary_entry_at(index);
+  assert(cpc_entry != NULL && cpc_entry->is_secondary_entry(), "sanity");
+  Handle call_site = cpc_entry->f1();
+
+  // Call site might not be linked yet.
+  if (call_site.is_null()) {
+    ciInstanceKlass* mh_klass = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
+    ciSymbol*       sig_sym   = get_object(cpool->signature_ref_at(index))->as_symbol();
+    return get_unloaded_method(mh_klass, ciSymbol::invoke_name(), sig_sym);
+  }
+
+  // Get the methodOop from the CallSite.
+  methodOop method_oop = (methodOop) java_dyn_CallSite::vmmethod(call_site());
+  assert(method_oop != NULL, "sanity");
+  assert(method_oop->is_method_handle_invoke(), "consistent");
+
+  return get_object(method_oop)->as_method();
+}
+
+
+// ------------------------------------------------------------------
 // ciEnv::get_instance_klass_for_declared_method_holder
 ciInstanceKlass* ciEnv::get_instance_klass_for_declared_method_holder(ciKlass* method_holder) {
   // For the case of <array>.clone(), the method holder can be a ciArrayKlass
@@ -736,15 +777,19 @@
 }
 
 
-
-
 // ------------------------------------------------------------------
 // ciEnv::get_method_by_index
-ciMethod* ciEnv::get_method_by_index(ciInstanceKlass* accessor,
-                                     int index, Bytecodes::Code bc) {
-  GUARDED_VM_ENTRY(return get_method_by_index_impl(accessor, index, bc);)
+ciMethod* ciEnv::get_method_by_index(constantPoolHandle cpool,
+                                     int index, Bytecodes::Code bc,
+                                     ciInstanceKlass* accessor) {
+  if (bc == Bytecodes::_invokedynamic) {
+    GUARDED_VM_ENTRY(return get_fake_invokedynamic_method_impl(cpool, index, bc);)
+  } else {
+    GUARDED_VM_ENTRY(return get_method_by_index_impl(cpool, index, bc, accessor);)
+  }
 }
 
+
 // ------------------------------------------------------------------
 // ciEnv::name_buffer
 char *ciEnv::name_buffer(int req_len) {
@@ -846,8 +891,8 @@
            JvmtiExport::can_examine_or_deopt_anywhere()) ||
           (!jvmti_can_access_local_variables() &&
            JvmtiExport::can_access_local_variables()) ||
-          (!jvmti_can_post_exceptions() &&
-           JvmtiExport::can_post_exceptions()) )) {
+          (!jvmti_can_post_on_exceptions() &&
+           JvmtiExport::can_post_on_exceptions()) )) {
       record_failure("Jvmti state change invalidated dependencies");
     }
 
@@ -917,18 +962,10 @@
     if (nm == NULL) {
       // The CodeCache is full.  Print out warning and disable compilation.
       record_failure("code cache is full");
-      UseInterpreter = true;
-      if (UseCompiler || AlwaysCompileLoopMethods ) {
-#ifndef PRODUCT
-        warning("CodeCache is full. Compiler has been disabled");
-        if (CompileTheWorld || ExitOnFullCodeCache) {
-          before_exit(JavaThread::current());
-          exit_globals(); // will delete tty
-          vm_direct_exit(CompileTheWorld ? 0 : 1);
-        }
-#endif
-        UseCompiler               = false;
-        AlwaysCompileLoopMethods  = false;
+      {
+        MutexUnlocker ml(Compile_lock);
+        MutexUnlocker locker(MethodCompileQueue_lock);
+        CompileBroker::handle_full_code_cache();
       }
     } else {
       NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); )
--- a/src/share/vm/ci/ciEnv.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciEnv.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,7 +57,7 @@
   bool  _jvmti_can_hotswap_or_post_breakpoint;
   bool  _jvmti_can_examine_or_deopt_anywhere;
   bool  _jvmti_can_access_local_variables;
-  bool  _jvmti_can_post_exceptions;
+  bool  _jvmti_can_post_on_exceptions;
 
   // Cache DTrace flags
   bool  _dtrace_extended_probes;
@@ -74,14 +74,9 @@
   static ciTypeArrayKlassKlass* _type_array_klass_klass_instance;
   static ciObjArrayKlassKlass*  _obj_array_klass_klass_instance;
 
-  static ciInstanceKlass* _ArrayStoreException;
-  static ciInstanceKlass* _Class;
-  static ciInstanceKlass* _ClassCastException;
-  static ciInstanceKlass* _Object;
-  static ciInstanceKlass* _Throwable;
-  static ciInstanceKlass* _Thread;
-  static ciInstanceKlass* _OutOfMemoryError;
-  static ciInstanceKlass* _String;
+#define WK_KLASS_DECL(name, ignore_s, ignore_o) static ciInstanceKlass* _##name;
+  WK_KLASSES_DO(WK_KLASS_DECL)
+#undef WK_KLASS_DECL
 
   static ciSymbol*        _unloaded_cisymbol;
   static ciInstanceKlass* _unloaded_ciinstance_klass;
@@ -97,6 +92,9 @@
   ciInstance* _ArrayStoreException_instance;
   ciInstance* _ClassCastException_instance;
 
+  ciInstance* _the_null_string;      // The Java string "null"
+  ciInstance* _the_min_jint_string; // The Java string "-2147483648"
+
   // Look up a klass by name from a particular class loader (the accessor's).
   // If require_local, result must be defined in that class loader, or NULL.
   // If !require_local, a result from remote class loader may be reported,
@@ -114,37 +112,45 @@
                              bool require_local);
 
   // Constant pool access.
-  ciKlass*   get_klass_by_index(ciInstanceKlass* loading_klass,
+  ciKlass*   get_klass_by_index(constantPoolHandle cpool,
                                 int klass_index,
-                                bool& is_accessible);
-  ciConstant get_constant_by_index(ciInstanceKlass* loading_klass,
-                                   int constant_index);
+                                bool& is_accessible,
+                                ciInstanceKlass* loading_klass);
+  ciConstant get_constant_by_index(constantPoolHandle cpool,
+                                   int constant_index,
+                                   ciInstanceKlass* accessor);
   bool       is_unresolved_string(ciInstanceKlass* loading_klass,
                                    int constant_index) const;
   bool       is_unresolved_klass(ciInstanceKlass* loading_klass,
                                    int constant_index) const;
   ciField*   get_field_by_index(ciInstanceKlass* loading_klass,
                                 int field_index);
-  ciMethod*  get_method_by_index(ciInstanceKlass* loading_klass,
-                                 int method_index, Bytecodes::Code bc);
+  ciMethod*  get_method_by_index(constantPoolHandle cpool,
+                                 int method_index, Bytecodes::Code bc,
+                                 ciInstanceKlass* loading_klass);
 
   // Implementation methods for loading and constant pool access.
   ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass,
                                   ciSymbol* klass_name,
                                   bool require_local);
-  ciKlass*   get_klass_by_index_impl(ciInstanceKlass* loading_klass,
+  ciKlass*   get_klass_by_index_impl(constantPoolHandle cpool,
                                      int klass_index,
-                                     bool& is_accessible);
-  ciConstant get_constant_by_index_impl(ciInstanceKlass* loading_klass,
-                                        int constant_index);
+                                     bool& is_accessible,
+                                     ciInstanceKlass* loading_klass);
+  ciConstant get_constant_by_index_impl(constantPoolHandle cpool,
+                                        int constant_index,
+                                        ciInstanceKlass* loading_klass);
   bool       is_unresolved_string_impl (instanceKlass* loading_klass,
                                         int constant_index) const;
   bool       is_unresolved_klass_impl (instanceKlass* loading_klass,
                                         int constant_index) const;
   ciField*   get_field_by_index_impl(ciInstanceKlass* loading_klass,
                                      int field_index);
-  ciMethod*  get_method_by_index_impl(ciInstanceKlass* loading_klass,
-                                      int method_index, Bytecodes::Code bc);
+  ciMethod*  get_method_by_index_impl(constantPoolHandle cpool,
+                                      int method_index, Bytecodes::Code bc,
+                                      ciInstanceKlass* loading_klass);
+  ciMethod*  get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
+                                                int index, Bytecodes::Code bc);
 
   // Helper methods
   bool       check_klass_accessibility(ciKlass* accessing_klass,
@@ -253,7 +259,7 @@
   bool  jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; }
   bool  jvmti_can_examine_or_deopt_anywhere()  const { return _jvmti_can_examine_or_deopt_anywhere; }
   bool  jvmti_can_access_local_variables()     const { return _jvmti_can_access_local_variables; }
-  bool  jvmti_can_post_exceptions()            const { return _jvmti_can_post_exceptions; }
+  bool  jvmti_can_post_on_exceptions()         const { return _jvmti_can_post_on_exceptions; }
 
   // Cache DTrace flags
   void  cache_dtrace_flags();
@@ -286,30 +292,13 @@
 
 
   // Access to certain well known ciObjects.
-  ciInstanceKlass* ArrayStoreException_klass() {
-    return _ArrayStoreException;
-  }
-  ciInstanceKlass* Class_klass() {
-    return _Class;
-  }
-  ciInstanceKlass* ClassCastException_klass() {
-    return _ClassCastException;
-  }
-  ciInstanceKlass* Object_klass() {
-    return _Object;
+#define WK_KLASS_FUNC(name, ignore_s, ignore_o) \
+  ciInstanceKlass* name() { \
+    return _##name;\
   }
-  ciInstanceKlass* Throwable_klass() {
-    return _Throwable;
-  }
-  ciInstanceKlass* Thread_klass() {
-    return _Thread;
-  }
-  ciInstanceKlass* OutOfMemoryError_klass() {
-    return _OutOfMemoryError;
-  }
-  ciInstanceKlass* String_klass() {
-    return _String;
-  }
+  WK_KLASSES_DO(WK_KLASS_FUNC)
+#undef WK_KLASS_FUNC
+
   ciInstance* NullPointerException_instance() {
     assert(_NullPointerException_instance != NULL, "initialization problem");
     return _NullPointerException_instance;
@@ -324,6 +313,9 @@
   ciInstance* ArrayStoreException_instance();
   ciInstance* ClassCastException_instance();
 
+  ciInstance* the_null_string();
+  ciInstance* the_min_jint_string();
+
   static ciSymbol* unloaded_cisymbol() {
     return _unloaded_cisymbol;
   }
--- a/src/share/vm/ci/ciExceptionHandler.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciExceptionHandler.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2003 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,12 +34,16 @@
 //
 // Get the exception klass that this handler catches.
 ciInstanceKlass* ciExceptionHandler::catch_klass() {
+  VM_ENTRY_MARK;
   assert(!is_catch_all(), "bad index");
   if (_catch_klass == NULL) {
     bool will_link;
-    ciKlass* k = CURRENT_ENV->get_klass_by_index(_loading_klass,
+    assert(_loading_klass->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
+    constantPoolHandle cpool(_loading_klass->get_instanceKlass()->constants());
+    ciKlass* k = CURRENT_ENV->get_klass_by_index(cpool,
                                                  _catch_klass_index,
-                                                 will_link);
+                                                 will_link,
+                                                 _loading_klass);
     if (!will_link && k->is_loaded()) {
       GUARDED_VM_ENTRY(
         k = CURRENT_ENV->get_unloaded_klass(_loading_klass, k->name());
--- a/src/share/vm/ci/ciField.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciField.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -86,7 +86,7 @@
     bool ignore;
     // This is not really a class reference; the index always refers to the
     // field's type signature, as a symbol.  Linkage checks do not apply.
-    _type = ciEnv::current(thread)->get_klass_by_index(klass, sig_index, ignore);
+    _type = ciEnv::current(thread)->get_klass_by_index(cpool, sig_index, ignore, klass);
   } else {
     _type = ciType::make(field_type);
   }
@@ -100,9 +100,9 @@
   int holder_index = cpool->klass_ref_index_at(index);
   bool holder_is_accessible;
   ciInstanceKlass* declared_holder =
-    ciEnv::current(thread)->get_klass_by_index(klass, holder_index,
-                                               holder_is_accessible)
-      ->as_instance_klass();
+    ciEnv::current(thread)->get_klass_by_index(cpool, holder_index,
+                                               holder_is_accessible,
+                                               klass)->as_instance_klass();
 
   // The declared holder of this field may not have been loaded.
   // Bail out with partial field information.
@@ -161,6 +161,18 @@
          "bootstrap classes must not create & cache unshared fields");
 }
 
+static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
+  if (holder == NULL)
+    return false;
+  if (holder->name() == ciSymbol::java_lang_System())
+    // Never trust strangely unstable finals:  System.out, etc.
+    return false;
+  // Even if general trusting is disabled, trust system-built closures in these packages.
+  if (holder->is_in_package("java/dyn") || holder->is_in_package("sun/dyn"))
+    return true;
+  return TrustFinalNonStaticFields;
+}
+
 void ciField::initialize_from(fieldDescriptor* fd) {
   // Get the flags, offset, and canonical holder of the field.
   _flags = ciFlags(fd->access_flags());
@@ -168,8 +180,18 @@
   _holder = CURRENT_ENV->get_object(fd->field_holder())->as_instance_klass();
 
   // Check to see if the field is constant.
-  if (_holder->is_initialized() &&
-      this->is_final() && this->is_static()) {
+  if (_holder->is_initialized() && this->is_final()) {
+    if (!this->is_static()) {
+      // A field can be constant if it's a final static field or if it's
+      // a final non-static field of a trusted class ({java,sun}.dyn).
+      if (trust_final_non_static_fields(_holder)) {
+        _is_constant = true;
+        return;
+      }
+      _is_constant = false;
+      return;
+    }
+
     // This field just may be constant.  The only cases where it will
     // not be constant are:
     //
@@ -182,8 +204,8 @@
     //    java.lang.System.out, and java.lang.System.err.
 
     klassOop k = _holder->get_klassOop();
-    assert( SystemDictionary::system_klass() != NULL, "Check once per vm");
-    if( k == SystemDictionary::system_klass() ) {
+    assert( SystemDictionary::System_klass() != NULL, "Check once per vm");
+    if( k == SystemDictionary::System_klass() ) {
       // Check offsets for case 2: System.in, System.out, or System.err
       if( _offset == java_lang_System::in_offset_in_bytes()  ||
           _offset == java_lang_System::out_offset_in_bytes() ||
--- a/src/share/vm/ci/ciField.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciField.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -138,10 +138,18 @@
 
   // Get the constant value of this field.
   ciConstant constant_value() {
-    assert(is_constant(), "illegal call to constant_value()");
+    assert(is_static() && is_constant(), "illegal call to constant_value()");
     return _constant_value;
   }
 
+  // Get the constant value of non-static final field in the given
+  // object.
+  ciConstant constant_value_of(ciObject* object) {
+    assert(!is_static() && is_constant(), "only if field is non-static constant");
+    assert(object->is_instance(), "must be instance");
+    return object->as_instance()->field_value(this);
+  }
+
   // Check for link time errors.  Accessing a field from a
   // certain class via a certain bytecode may or may not be legal.
   // This call checks to see if an exception may be raised by
--- a/src/share/vm/ci/ciInstance.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciInstance.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -36,7 +36,7 @@
   VM_ENTRY_MARK;
   oop m = get_oop();
   // Return NULL if it is not java.lang.Class.
-  if (m == NULL || m->klass() != SystemDictionary::class_klass()) {
+  if (m == NULL || m->klass() != SystemDictionary::Class_klass()) {
     return NULL;
   }
   // Return either a primitive type or a klass.
--- a/src/share/vm/ci/ciInstanceKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciInstanceKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -75,7 +75,7 @@
   _java_mirror = NULL;
 
   if (is_shared()) {
-    if (h_k() != SystemDictionary::object_klass()) {
+    if (h_k() != SystemDictionary::Object_klass()) {
       super();
     }
     java_mirror();
@@ -232,8 +232,48 @@
 // ------------------------------------------------------------------
 // ciInstanceKlass::uses_default_loader
 bool ciInstanceKlass::uses_default_loader() {
-  VM_ENTRY_MARK;
-  return loader() == NULL;
+  // Note:  We do not need to resolve the handle or enter the VM
+  // in order to test null-ness.
+  return _loader == NULL;
+}
+
+// ------------------------------------------------------------------
+// ciInstanceKlass::is_in_package
+//
+// Is this klass in the given package?
+bool ciInstanceKlass::is_in_package(const char* packagename, int len) {
+  // To avoid class loader mischief, this test always rejects application classes.
+  if (!uses_default_loader())
+    return false;
+  GUARDED_VM_ENTRY(
+    return is_in_package_impl(packagename, len);
+  )
+}
+
+bool ciInstanceKlass::is_in_package_impl(const char* packagename, int len) {
+  ASSERT_IN_VM;
+
+  // If packagename contains trailing '/' exclude it from the
+  // prefix-test since we test for it explicitly.
+  if (packagename[len - 1] == '/')
+    len--;
+
+  if (!name()->starts_with(packagename, len))
+    return false;
+
+  // Test if the class name is something like "java/lang".
+  if ((len + 1) > name()->utf8_length())
+    return false;
+
+  // Test for trailing '/'
+  if ((char) name()->byte_at(len) != '/')
+    return false;
+
+  // Make sure it's not actually in a subpackage:
+  if (name()->index_of_at(len+1, "/", 1) >= 0)
+    return false;
+
+  return true;
 }
 
 // ------------------------------------------------------------------
@@ -341,6 +381,20 @@
 }
 
 // ------------------------------------------------------------------
+// ciInstanceKlass::get_field_by_name
+ciField* ciInstanceKlass::get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static) {
+  VM_ENTRY_MARK;
+  instanceKlass* k = get_instanceKlass();
+  fieldDescriptor fd;
+  klassOop def = k->find_field(name->get_symbolOop(), signature->get_symbolOop(), is_static, &fd);
+  if (def == NULL) {
+    return NULL;
+  }
+  ciField* field = new (CURRENT_THREAD_ENV->arena()) ciField(&fd);
+  return field;
+}
+
+// ------------------------------------------------------------------
 // ciInstanceKlass::non_static_fields.
 
 class NonStaticFieldFiller: public FieldClosure {
--- a/src/share/vm/ci/ciInstanceKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciInstanceKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -29,10 +29,11 @@
 // be loaded.
 class ciInstanceKlass : public ciKlass {
   CI_PACKAGE_ACCESS
+  friend class ciBytecodeStream;
   friend class ciEnv;
+  friend class ciExceptionHandler;
   friend class ciMethod;
   friend class ciField;
-  friend class ciBytecodeStream;
 
 private:
   jobject                _loader;
@@ -78,6 +79,8 @@
 
   const char* type_string() { return "ciInstanceKlass"; }
 
+  bool is_in_package_impl(const char* packagename, int len);
+
   void print_impl(outputStream* st);
 
   ciConstantPoolCache* field_cache();
@@ -148,6 +151,7 @@
 
   ciInstanceKlass* get_canonical_holder(int offset);
   ciField* get_field_by_offset(int field_offset, bool is_static);
+  ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
 
   GrowableArray<ciField*>* non_static_fields();
 
@@ -195,6 +199,12 @@
 
   bool is_java_lang_Object();
 
+  // Is this klass in the given package?
+  bool is_in_package(const char* packagename) {
+    return is_in_package(packagename, (int) strlen(packagename));
+  }
+  bool is_in_package(const char* packagename, int len);
+
   // What kind of ciObject is this?
   bool is_instance_klass() { return true; }
   bool is_java_klass()     { return true; }
--- a/src/share/vm/ci/ciKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,7 @@
   ciKlass(KlassHandle k_h);
 
   // What is the name of this klass?
-  ciSymbol* name() { return _name; }
+  ciSymbol* name() const { return _name; }
 
   // What is its layout helper value?
   jint layout_helper() { return _layout_helper; }
--- a/src/share/vm/ci/ciMethod.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciMethod.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -436,15 +436,21 @@
           // we will set result._method also.
         }
         // Determine call site's morphism.
-        // The call site count could be == (receivers_count_total + 1)
-        // not only in the case of a polymorphic call but also in the case
-        // when a method data snapshot is taken after the site count was updated
-        // but before receivers counters were updated.
-        if (morphism == result._limit) {
-           // There were no array klasses and morphism <= MorphismLimit.
-           if (morphism <  ciCallProfile::MorphismLimit ||
-               morphism == ciCallProfile::MorphismLimit &&
-               (receivers_count_total+1) >= count) {
+        // The call site count is 0 with known morphism (onlt 1 or 2 receivers)
+        // or < 0 in the case of a type check failured for checkcast, aastore, instanceof.
+        // The call site count is > 0 in the case of a polymorphic virtual call.
+        if (morphism > 0 && morphism == result._limit) {
+           // The morphism <= MorphismLimit.
+           if ((morphism <  ciCallProfile::MorphismLimit) ||
+               (morphism == ciCallProfile::MorphismLimit && count == 0)) {
+#ifdef ASSERT
+             if (count > 0) {
+               this->print_short_name(tty);
+               tty->print_cr(" @ bci:%d", bci);
+               this->print_codes();
+               assert(false, "this call site should not be polymorphic");
+             }
+#endif
              result._morphism = morphism;
            }
         }
@@ -452,10 +458,8 @@
         // zero or less, presume that this is a typecheck profile and
         // do nothing.  Otherwise, increase count to be the sum of all
         // receiver's counts.
-        if (count > 0) {
-          if (count < receivers_count_total) {
-            count = receivers_count_total;
-          }
+        if (count >= 0) {
+          count += receivers_count_total;
         }
       }
       result._count = count;
@@ -687,7 +691,7 @@
 // ------------------------------------------------------------------
 // invokedynamic support
 //
-bool ciMethod::is_method_handle_invoke() {
+bool ciMethod::is_method_handle_invoke() const {
   check_is_loaded();
   bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
 #ifdef ASSERT
@@ -700,6 +704,12 @@
   return flag;
 }
 
+bool ciMethod::is_method_handle_adapter() const {
+  check_is_loaded();
+  VM_ENTRY_MARK;
+  return get_methodOop()->is_method_handle_adapter();
+}
+
 ciInstance* ciMethod::method_handle_type() {
   check_is_loaded();
   VM_ENTRY_MARK;
--- a/src/share/vm/ci/ciMethod.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciMethod.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,8 @@
   CI_PACKAGE_ACCESS
   friend class ciEnv;
   friend class ciExceptionHandlerStream;
+  friend class ciBytecodeStream;
+  friend class ciMethodHandle;
 
  private:
   // General method information.
@@ -213,7 +215,10 @@
   bool check_call(int refinfo_index, bool is_static) const;
   void build_method_data();  // make sure it exists in the VM also
   int scale_count(int count, float prof_factor = 1.);  // make MDO count commensurate with IIC
-  bool is_method_handle_invoke();
+
+  // JSR 292 support
+  bool is_method_handle_invoke()  const;
+  bool is_method_handle_adapter() const;
   ciInstance* method_handle_type();
 
   // What kind of ciObject is this?
@@ -251,4 +256,10 @@
   // Print the name of this method in various incarnations.
   void print_name(outputStream* st = tty);
   void print_short_name(outputStream* st = tty);
+
+  methodOop get_method_handle_target() {
+    klassOop receiver_limit_oop = NULL;
+    int flags = 0;
+    return MethodHandles::decode_method(get_oop(), receiver_limit_oop, flags);
+  }
 };
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/ci/ciMethodHandle.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_ciMethodHandle.cpp.incl"
+
+// ciMethodHandle
+
+// ------------------------------------------------------------------
+// ciMethodHandle::get_adapter
+//
+// Return an adapter for this MethodHandle.
+ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
+  VM_ENTRY_MARK;
+
+  Handle h(get_oop());
+  methodHandle callee(_callee->get_methodOop());
+  MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD);
+  methodHandle m = mhc.compile(CHECK_NULL);
+  return CURRENT_ENV->get_object(m())->as_method();
+}
+
+
+// ------------------------------------------------------------------
+// ciMethodHandle::print_impl
+//
+// Implementation of the print method.
+void ciMethodHandle::print_impl(outputStream* st) {
+  st->print(" type=");
+  get_oop()->print();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/ci/ciMethodHandle.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ciMethodHandle
+//
+// The class represents a java.dyn.MethodHandle object.
+class ciMethodHandle : public ciInstance {
+private:
+  ciMethod* _callee;
+
+  // Return an adapter for this MethodHandle.
+  ciMethod* get_adapter(bool is_invokedynamic) const;
+
+protected:
+  void print_impl(outputStream* st);
+
+public:
+  ciMethodHandle(instanceHandle h_i) : ciInstance(h_i) {};
+
+  // What kind of ciObject is this?
+  bool is_method_handle() const { return true; }
+
+  ciMethod* callee() const { return _callee; }
+  void  set_callee(ciMethod* m) { _callee = m; }
+
+  // Return an adapter for a MethodHandle call.
+  ciMethod* get_method_handle_adapter() const {
+    return get_adapter(false);
+  }
+
+  // Return an adapter for an invokedynamic call.
+  ciMethod* get_invokedynamic_adapter() const {
+    return get_adapter(true);
+  }
+};
--- a/src/share/vm/ci/ciObject.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciObject.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -131,9 +131,12 @@
 
   // What kind of ciObject is this?
   virtual bool is_null_object() const       { return false; }
+  virtual bool is_call_site() const         { return false; }
+  virtual bool is_cpcache() const           { return false; }
   virtual bool is_instance()                { return false; }
   virtual bool is_method()                  { return false; }
   virtual bool is_method_data()             { return false; }
+  virtual bool is_method_handle() const     { return false; }
   virtual bool is_array()                   { return false; }
   virtual bool is_obj_array()               { return false; }
   virtual bool is_type_array()              { return false; }
@@ -185,6 +188,14 @@
     assert(is_null_object(), "bad cast");
     return (ciNullObject*)this;
   }
+  ciCallSite*              as_call_site() {
+    assert(is_call_site(), "bad cast");
+    return (ciCallSite*) this;
+  }
+  ciCPCache*               as_cpcache() {
+    assert(is_cpcache(), "bad cast");
+    return (ciCPCache*) this;
+  }
   ciInstance*              as_instance() {
     assert(is_instance(), "bad cast");
     return (ciInstance*)this;
@@ -197,6 +208,10 @@
     assert(is_method_data(), "bad cast");
     return (ciMethodData*)this;
   }
+  ciMethodHandle*          as_method_handle() {
+    assert(is_method_handle(), "bad cast");
+    return (ciMethodHandle*) this;
+  }
   ciArray*                 as_array() {
     assert(is_array(), "bad cast");
     return (ciArray*)this;
--- a/src/share/vm/ci/ciObjectFactory.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -144,30 +144,13 @@
   ciEnv::_obj_array_klass_klass_instance =
     get(Universe::objArrayKlassKlassObj())
       ->as_obj_array_klass_klass();
-  ciEnv::_ArrayStoreException =
-    get(SystemDictionary::ArrayStoreException_klass())
-      ->as_instance_klass();
-  ciEnv::_Class =
-    get(SystemDictionary::class_klass())
-      ->as_instance_klass();
-  ciEnv::_ClassCastException =
-    get(SystemDictionary::ClassCastException_klass())
-      ->as_instance_klass();
-  ciEnv::_Object =
-    get(SystemDictionary::object_klass())
-      ->as_instance_klass();
-  ciEnv::_Throwable =
-    get(SystemDictionary::throwable_klass())
-      ->as_instance_klass();
-  ciEnv::_Thread =
-    get(SystemDictionary::thread_klass())
-      ->as_instance_klass();
-  ciEnv::_OutOfMemoryError =
-    get(SystemDictionary::OutOfMemoryError_klass())
-      ->as_instance_klass();
-  ciEnv::_String =
-    get(SystemDictionary::string_klass())
-      ->as_instance_klass();
+
+#define WK_KLASS_DEFN(name, ignore_s, opt)                              \
+  if (SystemDictionary::name() != NULL) \
+    ciEnv::_##name = get(SystemDictionary::name())->as_instance_klass();
+
+  WK_KLASSES_DO(WK_KLASS_DEFN)
+#undef WK_KLASS_DEFN
 
   for (int len = -1; len != _ci_objects->length(); ) {
     len = _ci_objects->length();
@@ -324,13 +307,21 @@
     return new (arena()) ciMethodData(h_md);
   } else if (o->is_instance()) {
     instanceHandle h_i(THREAD, (instanceOop)o);
-    return new (arena()) ciInstance(h_i);
+    if (java_dyn_CallSite::is_instance(o))
+      return new (arena()) ciCallSite(h_i);
+    else if (java_dyn_MethodHandle::is_instance(o))
+      return new (arena()) ciMethodHandle(h_i);
+    else
+      return new (arena()) ciInstance(h_i);
   } else if (o->is_objArray()) {
     objArrayHandle h_oa(THREAD, (objArrayOop)o);
     return new (arena()) ciObjArray(h_oa);
   } else if (o->is_typeArray()) {
     typeArrayHandle h_ta(THREAD, (typeArrayOop)o);
     return new (arena()) ciTypeArray(h_ta);
+  } else if (o->is_constantPoolCache()) {
+    constantPoolCacheHandle h_cpc(THREAD, (constantPoolCacheOop) o);
+    return new (arena()) ciCPCache(h_cpc);
   }
 
   // The oop is of some type not supported by the compiler interface.
@@ -567,7 +558,7 @@
   if (key->is_perm() && _non_perm_count == 0) {
     return emptyBucket;
   } else if (key->is_instance()) {
-    if (key->klass() == SystemDictionary::class_klass()) {
+    if (key->klass() == SystemDictionary::Class_klass()) {
       // class mirror instances are always perm
       return emptyBucket;
     }
--- a/src/share/vm/ci/ciStreams.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciStreams.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -186,8 +186,9 @@
 // If this bytecode is a new, newarray, multianewarray, instanceof,
 // or checkcast, get the referenced klass.
 ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
-  return CURRENT_ENV->get_klass_by_index(_holder, get_klass_index(),
-                                         will_link);
+  VM_ENTRY_MARK;
+  constantPoolHandle cpool(_method->get_methodOop()->constants());
+  return CURRENT_ENV->get_klass_by_index(cpool, get_klass_index(), will_link, _holder);
 }
 
 // ------------------------------------------------------------------
@@ -213,7 +214,9 @@
 // If this bytecode is one of the ldc variants, get the referenced
 // constant.
 ciConstant ciBytecodeStream::get_constant() {
-  return CURRENT_ENV->get_constant_by_index(_holder, get_constant_index());
+  VM_ENTRY_MARK;
+  constantPoolHandle cpool(_method->get_methodOop()->constants());
+  return CURRENT_ENV->get_constant_by_index(cpool, get_constant_index(), _holder);
 }
 
 // ------------------------------------------------------------------
@@ -264,9 +267,11 @@
 // There is no "will_link" result passed back.  The user is responsible
 // for checking linkability when retrieving the associated field.
 ciInstanceKlass* ciBytecodeStream::get_declared_field_holder() {
+  VM_ENTRY_MARK;
+  constantPoolHandle cpool(_method->get_methodOop()->constants());
   int holder_index = get_field_holder_index();
   bool ignore;
-  return CURRENT_ENV->get_klass_by_index(_holder, holder_index, ignore)
+  return CURRENT_ENV->get_klass_by_index(cpool, holder_index, ignore, _holder)
       ->as_instance_klass();
 }
 
@@ -277,9 +282,10 @@
 // referenced by the current bytecode.  Used for generating
 // deoptimization information.
 int ciBytecodeStream::get_field_holder_index() {
-  VM_ENTRY_MARK;
-  constantPoolOop cpool = _holder->get_instanceKlass()->constants();
-  return cpool->klass_ref_index_at(get_field_index());
+  GUARDED_VM_ENTRY(
+    constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+    return cpool->klass_ref_index_at(get_field_index());
+  )
 }
 
 // ------------------------------------------------------------------
@@ -321,7 +327,9 @@
 //
 // If this is a method invocation bytecode, get the invoked method.
 ciMethod* ciBytecodeStream::get_method(bool& will_link) {
-  ciMethod* m = CURRENT_ENV->get_method_by_index(_holder, get_method_index(),cur_bc());
+  VM_ENTRY_MARK;
+  constantPoolHandle cpool(_method->get_methodOop()->constants());
+  ciMethod* m = CURRENT_ENV->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
   will_link = m->is_loaded();
   return m;
 }
@@ -338,11 +346,13 @@
 // There is no "will_link" result passed back.  The user is responsible
 // for checking linkability when retrieving the associated method.
 ciKlass* ciBytecodeStream::get_declared_method_holder() {
+  VM_ENTRY_MARK;
+  constantPoolHandle cpool(_method->get_methodOop()->constants());
   bool ignore;
-  // report as Dynamic for invokedynamic, which is syntactically classless
+  // report as InvokeDynamic for invokedynamic, which is syntactically classless
   if (cur_bc() == Bytecodes::_invokedynamic)
-    return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_Dynamic(), false);
-  return CURRENT_ENV->get_klass_by_index(_holder, get_method_holder_index(), ignore);
+    return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_InvokeDynamic(), false);
+  return CURRENT_ENV->get_klass_by_index(cpool, get_method_holder_index(), ignore, _holder);
 }
 
 // ------------------------------------------------------------------
@@ -352,8 +362,7 @@
 // referenced by the current bytecode.  Used for generating
 // deoptimization information.
 int ciBytecodeStream::get_method_holder_index() {
-  VM_ENTRY_MARK;
-  constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+  constantPoolOop cpool = _method->get_methodOop()->constants();
   return cpool->klass_ref_index_at(get_method_index());
 }
 
@@ -370,3 +379,31 @@
   int name_and_type_index = cpool->name_and_type_ref_index_at(method_index);
   return cpool->signature_ref_index_at(name_and_type_index);
 }
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_cpcache
+ciCPCache* ciBytecodeStream::get_cpcache() {
+  VM_ENTRY_MARK;
+  // Get the constant pool.
+  constantPoolOop      cpool   = _holder->get_instanceKlass()->constants();
+  constantPoolCacheOop cpcache = cpool->cache();
+
+  return CURRENT_ENV->get_object(cpcache)->as_cpcache();
+}
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_call_site
+ciCallSite* ciBytecodeStream::get_call_site() {
+  VM_ENTRY_MARK;
+  // Get the constant pool.
+  constantPoolOop      cpool   = _holder->get_instanceKlass()->constants();
+  constantPoolCacheOop cpcache = cpool->cache();
+
+  // Get the CallSite from the constant pool cache.
+  int method_index = get_method_index();
+  ConstantPoolCacheEntry* cpcache_entry = cpcache->secondary_entry_at(method_index);
+  oop call_site_oop = cpcache_entry->f1();
+
+  // Create a CallSite object and return it.
+  return CURRENT_ENV->get_object(call_site_oop)->as_call_site();
+}
--- a/src/share/vm/ci/ciStreams.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciStreams.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -232,6 +232,9 @@
   int       get_method_holder_index();
   int       get_method_signature_index();
 
+  ciCPCache*  get_cpcache();
+  ciCallSite* get_call_site();
+
  private:
   void assert_index_size(int required_size) const {
 #ifdef ASSERT
--- a/src/share/vm/ci/ciSymbol.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciSymbol.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,6 +60,22 @@
 }
 
 // ------------------------------------------------------------------
+// ciSymbol::starts_with
+//
+// Tests if the symbol starts with the given prefix.
+bool ciSymbol::starts_with(const char* prefix, int len) const {
+  GUARDED_VM_ENTRY(return get_symbolOop()->starts_with(prefix, len);)
+}
+
+// ------------------------------------------------------------------
+// ciSymbol::index_of
+//
+// Determines where the symbol contains the given substring.
+int ciSymbol::index_of_at(int i, const char* str, int len) const {
+  GUARDED_VM_ENTRY(return get_symbolOop()->index_of_at(i, str, len);)
+}
+
+// ------------------------------------------------------------------
 // ciSymbol::utf8_length
 int ciSymbol::utf8_length() {
   GUARDED_VM_ENTRY(return get_symbolOop()->utf8_length();)
--- a/src/share/vm/ci/ciSymbol.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciSymbol.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 // machine.
 class ciSymbol : public ciObject {
   CI_PACKAGE_ACCESS
+  // These friends all make direct use of get_symbolOop:
   friend class ciEnv;
   friend class ciInstanceKlass;
   friend class ciSignature;
@@ -38,13 +39,13 @@
   ciSymbol(symbolOop s) : ciObject(s) {}
   ciSymbol(symbolHandle s);   // for use with vmSymbolHandles
 
-  symbolOop get_symbolOop() { return (symbolOop)get_oop(); }
+  symbolOop get_symbolOop() const { return (symbolOop)get_oop(); }
 
   const char* type_string() { return "ciSymbol"; }
 
   void print_impl(outputStream* st);
 
-  int         byte_at(int i);
+  // This is public in symbolOop but private here, because the base can move:
   jbyte*      base();
 
   // Make a ciSymbol from a C string (implementation).
@@ -55,6 +56,15 @@
   const char* as_utf8();
   int         utf8_length();
 
+  // Return the i-th utf8 byte, where i < utf8_length
+  int         byte_at(int i);
+
+  // Tests if the symbol starts with the given prefix.
+  bool starts_with(const char* prefix, int len) const;
+
+  // Determines where the symbol contains the given substring.
+  int index_of_at(int i, const char* str, int len) const;
+
   // What kind of ciObject is this?
   bool is_symbol() { return true; }
 
--- a/src/share/vm/ci/ciType.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciType.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -111,7 +111,7 @@
   // short, etc.
   // Note: Bare T_ADDRESS means a raw pointer type, not a return_address.
   assert((uint)t < T_CONFLICT+1, "range check");
-  if (t == T_OBJECT)  return ciEnv::_Object;  // java/lang/Object
+  if (t == T_OBJECT)  return ciEnv::_Object_klass;  // java/lang/Object
   assert(_basic_types[t] != NULL, "domain check");
   return _basic_types[t];
 }
--- a/src/share/vm/ci/ciTypeFlow.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciTypeFlow.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -635,8 +635,15 @@
   ciMethod* method = str->get_method(will_link);
   if (!will_link) {
     // We weren't able to find the method.
-    ciKlass* unloaded_holder = method->holder();
-    trap(str, unloaded_holder, str->get_method_holder_index());
+    if (str->cur_bc() == Bytecodes::_invokedynamic) {
+      trap(str, NULL,
+           Deoptimization::make_trap_request
+           (Deoptimization::Reason_uninitialized,
+            Deoptimization::Action_reinterpret));
+    } else {
+      ciKlass* unloaded_holder = method->holder();
+      trap(str, unloaded_holder, str->get_method_holder_index());
+    }
   } else {
     ciSignature* signature = method->signature();
     ciSignatureStream sigstr(signature);
@@ -1292,8 +1299,8 @@
   case Bytecodes::_invokeinterface: do_invoke(str, true);           break;
   case Bytecodes::_invokespecial:   do_invoke(str, true);           break;
   case Bytecodes::_invokestatic:    do_invoke(str, false);          break;
-
   case Bytecodes::_invokevirtual:   do_invoke(str, true);           break;
+  case Bytecodes::_invokedynamic:   do_invoke(str, false);          break;
 
   case Bytecodes::_istore:   store_local_int(str->get_index());     break;
   case Bytecodes::_istore_0: store_local_int(0);                    break;
--- a/src/share/vm/ci/ciUtilities.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/ci/ciUtilities.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -79,7 +79,7 @@
   THREAD);                                       \
   if (HAS_PENDING_EXCEPTION) {                   \
     if (PENDING_EXCEPTION->klass() ==            \
-        SystemDictionary::threaddeath_klass()) { \
+        SystemDictionary::ThreadDeath_klass()) { \
       /* Kill the compilation. */                \
       fatal("unhandled ci exception");           \
       return (result);                           \
--- a/src/share/vm/classfile/classFileParser.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/classFileParser.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -430,7 +430,7 @@
   case JVM_CONSTANT_UnresolvedClass :
     // Patching a class means pre-resolving it.
     // The name in the constant pool is ignored.
-    if (patch->klass() == SystemDictionary::class_klass()) { // %%% java_lang_Class::is_instance
+    if (patch->klass() == SystemDictionary::Class_klass()) { // %%% java_lang_Class::is_instance
       guarantee_property(!java_lang_Class::is_primitive(patch()),
                          "Illegal class patch at %d in class file %s",
                          index, CHECK);
@@ -643,7 +643,7 @@
       guarantee_property(value_type.is_int(), "Inconsistent constant value type in class file %s", CHECK);
       break;
     case T_OBJECT:
-      guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;", 18)
+      guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;")
                          && (value_type.is_string() || value_type.is_unresolved_string())),
                          "Bad string initial value in class file %s", CHECK);
       break;
@@ -1718,9 +1718,7 @@
   m->set_exception_table(exception_handlers());
 
   // Copy byte codes
-  if (code_length > 0) {
-    memcpy(m->code_base(), code_start, code_length);
-  }
+  m->set_code(code_start);
 
   // Copy line number table
   if (linenumber_table != NULL) {
@@ -2511,23 +2509,12 @@
       fac_ptr->nonstatic_byte_count -= 1;
       (*fields_ptr)->ushort_at_put(i + instanceKlass::signature_index_offset,
                                    word_sig_index);
-      if (wordSize == jintSize) {
-        fac_ptr->nonstatic_word_count += 1;
-      } else {
-        fac_ptr->nonstatic_double_count += 1;
-      }
-
-      FieldAllocationType atype = (FieldAllocationType) (*fields_ptr)->ushort_at(i+4);
+      fac_ptr->nonstatic_word_count += 1;
+
+      FieldAllocationType atype = (FieldAllocationType) (*fields_ptr)->ushort_at(i + instanceKlass::low_offset);
       assert(atype == NONSTATIC_BYTE, "");
       FieldAllocationType new_atype = NONSTATIC_WORD;
-      if (wordSize > jintSize) {
-        if (Universe::field_type_should_be_aligned(T_LONG)) {
-          atype = NONSTATIC_ALIGNED_DOUBLE;
-        } else {
-          atype = NONSTATIC_DOUBLE;
-        }
-      }
-      (*fields_ptr)->ushort_at_put(i+4, new_atype);
+      (*fields_ptr)->ushort_at_put(i + instanceKlass::low_offset, new_atype);
 
       found_vmentry = true;
       break;
@@ -3085,7 +3072,7 @@
     int len = fields->length();
     for (int i = 0; i < len; i += instanceKlass::next_offset) {
       int real_offset;
-      FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i+4);
+      FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i + instanceKlass::low_offset);
       switch (atype) {
         case STATIC_OOP:
           real_offset = next_static_oop_offset;
@@ -3173,8 +3160,8 @@
         default:
           ShouldNotReachHere();
       }
-      fields->short_at_put(i+4, extract_low_short_from_int(real_offset) );
-      fields->short_at_put(i+5, extract_high_short_from_int(real_offset) );
+      fields->short_at_put(i + instanceKlass::low_offset,  extract_low_short_from_int(real_offset));
+      fields->short_at_put(i + instanceKlass::high_offset, extract_high_short_from_int(real_offset));
     }
 
     // Size of instances
@@ -3482,8 +3469,8 @@
 #endif
 
   // Check if this klass supports the java.lang.Cloneable interface
-  if (SystemDictionary::cloneable_klass_loaded()) {
-    if (k->is_subtype_of(SystemDictionary::cloneable_klass())) {
+  if (SystemDictionary::Cloneable_klass_loaded()) {
+    if (k->is_subtype_of(SystemDictionary::Cloneable_klass())) {
       k->set_is_cloneable();
     }
   }
@@ -3766,8 +3753,9 @@
 }
 
 bool ClassFileParser::is_supported_version(u2 major, u2 minor) {
-  u2 max_version = JDK_Version::is_gte_jdk17x_version() ?
-    JAVA_MAX_SUPPORTED_VERSION : JAVA_6_VERSION;
+  u2 max_version =
+    JDK_Version::is_gte_jdk17x_version() ? JAVA_MAX_SUPPORTED_VERSION :
+    (JDK_Version::is_gte_jdk16x_version() ? JAVA_6_VERSION : JAVA_1_5_VERSION);
   return (major >= JAVA_MIN_SUPPORTED_VERSION) &&
          (major <= max_version) &&
          ((major != max_version) ||
@@ -4188,7 +4176,7 @@
       // Check if ch is Java identifier start or is Java identifier part
       // 4672820: call java.lang.Character methods directly without generating separate tables.
       EXCEPTION_MARK;
-      instanceKlassHandle klass (THREAD, SystemDictionary::char_klass());
+      instanceKlassHandle klass (THREAD, SystemDictionary::Character_klass());
 
       // return value
       JavaValue result(T_BOOLEAN);
--- a/src/share/vm/classfile/classLoader.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/classLoader.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -819,7 +819,7 @@
     _package_hash_table->copy_pkgnames(packages);
   }
   // Allocate objArray and fill with java.lang.String
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
                                            nof_entries, CHECK_0);
   objArrayHandle result(THREAD, r);
   for (int i = 0; i < nof_entries; i++) {
@@ -1249,6 +1249,7 @@
 }
 
 int ClassLoader::_compile_the_world_counter = 0;
+static int _codecache_sweep_counter = 0;
 
 void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
   int len = (int)strlen(name);
@@ -1293,6 +1294,13 @@
           for (int n = 0; n < k->methods()->length(); n++) {
             methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
             if (CompilationPolicy::canBeCompiled(m)) {
+
+              if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
+                // Give sweeper a chance to keep up with CTW
+                VM_ForceSafepoint op;
+                VMThread::execute(&op);
+                _codecache_sweep_counter = 0;
+              }
               // Force compilation
               CompileBroker::compile_method(m, InvocationEntryBci,
                                             methodHandle(), 0, "CTW", THREAD);
--- a/src/share/vm/classfile/javaAssertions.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/javaAssertions.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -95,14 +95,14 @@
   int len;
   typeArrayOop t;
   len = OptionList::count(_packages);
-  objArrayOop pn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
+  objArrayOop pn = oopFactory::new_objArray(SystemDictionary::String_klass(), len, CHECK_NULL);
   objArrayHandle pkgNames (THREAD, pn);
   t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
   typeArrayHandle pkgEnabled(THREAD, t);
   fillJavaArrays(_packages, len, pkgNames, pkgEnabled, CHECK_NULL);
 
   len = OptionList::count(_classes);
-  objArrayOop cn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
+  objArrayOop cn = oopFactory::new_objArray(SystemDictionary::String_klass(), len, CHECK_NULL);
   objArrayHandle classNames (THREAD, cn);
   t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
   typeArrayHandle classEnabled(THREAD, t);
--- a/src/share/vm/classfile/javaClasses.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/javaClasses.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -68,9 +68,9 @@
   // and the char array it points to end up in the same cache line.
   oop obj;
   if (tenured) {
-    obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_permanent_instance(CHECK_NH);
+    obj = instanceKlass::cast(SystemDictionary::String_klass())->allocate_permanent_instance(CHECK_NH);
   } else {
-    obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_instance(CHECK_NH);
+    obj = instanceKlass::cast(SystemDictionary::String_klass())->allocate_instance(CHECK_NH);
   }
 
   // Create the char array.  The String object must be handlized here
@@ -293,7 +293,7 @@
 
 bool java_lang_String::equals(oop java_string, jchar* chars, int len) {
   assert(SharedSkipVerify ||
-         java_string->klass() == SystemDictionary::string_klass(),
+         java_string->klass() == SystemDictionary::String_klass(),
          "must be java_string");
   typeArrayOop value  = java_lang_String::value(java_string);
   int          offset = java_lang_String::offset(java_string);
@@ -311,7 +311,7 @@
 
 void java_lang_String::print(Handle java_string, outputStream* st) {
   oop          obj    = java_string();
-  assert(obj->klass() == SystemDictionary::string_klass(), "must be java_string");
+  assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
   typeArrayOop value  = java_lang_String::value(obj);
   int          offset = java_lang_String::offset(obj);
   int          length = java_lang_String::length(obj);
@@ -339,9 +339,9 @@
   // class is put into the system dictionary.
   int computed_modifiers = k->compute_modifier_flags(CHECK_0);
   k->set_modifier_flags(computed_modifiers);
-  if (SystemDictionary::class_klass_loaded()) {
+  if (SystemDictionary::Class_klass_loaded()) {
     // Allocate mirror (java.lang.Class instance)
-    Handle mirror = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
+    Handle mirror = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0);
     // Setup indirections
     mirror->obj_field_put(klass_offset,  k());
     k->set_java_mirror(mirror());
@@ -378,7 +378,7 @@
 oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
   // This should be improved by adding a field at the Java level or by
   // introducing a new VM klass (see comment in ClassFileParser)
-  oop java_class = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
+  oop java_class = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0);
   if (type != T_VOID) {
     klassOop aklass = Universe::typeArrayKlassObj(type);
     assert(aklass != NULL, "correct bootstrap");
@@ -502,7 +502,7 @@
 
 oop java_lang_Class::primitive_mirror(BasicType t) {
   oop mirror = Universe::java_mirror(t);
-  assert(mirror != NULL && mirror->is_a(SystemDictionary::class_klass()), "must be a Class");
+  assert(mirror != NULL && mirror->is_a(SystemDictionary::Class_klass()), "must be a Class");
   assert(java_lang_Class::is_primitive(mirror), "must be primitive");
   return mirror;
 }
@@ -515,14 +515,14 @@
   assert(!offsets_computed, "offsets should be initialized only once");
   offsets_computed = true;
 
-  klassOop k = SystemDictionary::class_klass();
+  klassOop k = SystemDictionary::Class_klass();
   // The classRedefinedCount field is only present starting in 1.5,
   // so don't go fatal.
   compute_optional_offset(classRedefinedCount_offset,
     k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
 
   // The field indicating parallelCapable (parallelLockMap) is only present starting in 7,
-  klassOop k1 = SystemDictionary::classloader_klass();
+  klassOop k1 = SystemDictionary::ClassLoader_klass();
   compute_optional_offset(parallelCapable_offset,
     k1, vmSymbols::parallelCapable_name(), vmSymbols::concurrenthashmap_signature());
 }
@@ -588,7 +588,7 @@
 void java_lang_Thread::compute_offsets() {
   assert(_group_offset == 0, "offsets should be initialized only once");
 
-  klassOop k = SystemDictionary::thread_klass();
+  klassOop k = SystemDictionary::Thread_klass();
   compute_offset(_name_offset,      k, vmSymbols::name_name(),      vmSymbols::char_array_signature());
   compute_offset(_group_offset,     k, vmSymbols::group_name(),     vmSymbols::threadgroup_signature());
   compute_offset(_contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature());
@@ -847,7 +847,7 @@
 void java_lang_ThreadGroup::compute_offsets() {
   assert(_parent_offset == 0, "offsets should be initialized only once");
 
-  klassOop k = SystemDictionary::threadGroup_klass();
+  klassOop k = SystemDictionary::ThreadGroup_klass();
 
   compute_offset(_parent_offset,      k, vmSymbols::parent_name(),      vmSymbols::threadgroup_signature());
   compute_offset(_name_offset,        k, vmSymbols::name_name(),        vmSymbols::string_signature());
@@ -1121,11 +1121,23 @@
   }
 
   void flush() {
+    // The following appears to have been an optimization to save from
+    // doing a barrier for each individual store into the _methods array,
+    // but rather to do it for the entire array after the series of writes.
+    // That optimization seems to have been lost when compressed oops was
+    // implemented. However, the extra card-marks below was left in place,
+    // but is now redundant because the individual stores into the
+    // _methods array already execute the barrier code. CR 6918185 has
+    // been filed so the original code may be restored by deferring the
+    // barriers until after the entire sequence of stores, thus re-enabling
+    // the intent of the original optimization. In the meantime the redundant
+    // card mark below is now disabled.
     if (_dirty && _methods != NULL) {
+#if 0
       BarrierSet* bs = Universe::heap()->barrier_set();
       assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-      bs->write_ref_array(MemRegion((HeapWord*)_methods->base(),
-                                    _methods->array_size()));
+      bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
+#endif
       _dirty = false;
     }
   }
@@ -1169,9 +1181,7 @@
       method = mhandle();
     }
 
-     _methods->obj_at_put(_index, method);
-    // bad for UseCompressedOops
-    // *_methods->obj_at_addr(_index) = method;
+    _methods->obj_at_put(_index, method);
     _bcis->ushort_at_put(_index, bci);
     _index++;
     _dirty = true;
@@ -1345,7 +1355,7 @@
   // No-op if stack trace is disabled
   if (!StackTraceInThrowable) return;
 
-  assert(throwable->is_a(SystemDictionary::throwable_klass()), "sanity check");
+  assert(throwable->is_a(SystemDictionary::Throwable_klass()), "sanity check");
 
   oop backtrace = java_lang_Throwable::backtrace(throwable());
   assert(backtrace != NULL, "backtrace not preallocated");
@@ -1450,7 +1460,7 @@
   assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
 
   // Allocate java.lang.StackTraceElement instance
-  klassOop k = SystemDictionary::stackTraceElement_klass();
+  klassOop k = SystemDictionary::StackTraceElement_klass();
   assert(k != NULL, "must be loaded in 1.4+");
   instanceKlassHandle ik (THREAD, k);
   if (ik->should_be_initialized()) {
@@ -1488,7 +1498,7 @@
 
 
 void java_lang_reflect_AccessibleObject::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_accessible_object_klass();
+  klassOop k = SystemDictionary::reflect_AccessibleObject_klass();
   compute_offset(override_offset, k, vmSymbols::override_name(), vmSymbols::bool_signature());
 }
 
@@ -1503,7 +1513,7 @@
 }
 
 void java_lang_reflect_Method::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_method_klass();
+  klassOop k = SystemDictionary::reflect_Method_klass();
   compute_offset(clazz_offset,          k, vmSymbols::clazz_name(),          vmSymbols::class_signature());
   compute_offset(name_offset,           k, vmSymbols::name_name(),           vmSymbols::string_signature());
   compute_offset(returnType_offset,     k, vmSymbols::returnType_name(),     vmSymbols::class_signature());
@@ -1524,7 +1534,7 @@
 
 Handle java_lang_reflect_Method::create(TRAPS) {
   assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  klassOop klass = SystemDictionary::reflect_method_klass();
+  klassOop klass = SystemDictionary::reflect_Method_klass();
   // This class is eagerly initialized during VM initialization, since we keep a refence
   // to one of the methods
   assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized");
@@ -1666,7 +1676,7 @@
 }
 
 void java_lang_reflect_Constructor::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_constructor_klass();
+  klassOop k = SystemDictionary::reflect_Constructor_klass();
   compute_offset(clazz_offset,          k, vmSymbols::clazz_name(),          vmSymbols::class_signature());
   compute_offset(parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature());
   compute_offset(exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature());
@@ -1790,7 +1800,7 @@
 }
 
 void java_lang_reflect_Field::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_field_klass();
+  klassOop k = SystemDictionary::reflect_Field_klass();
   compute_offset(clazz_offset,     k, vmSymbols::clazz_name(),     vmSymbols::class_signature());
   compute_offset(name_offset,      k, vmSymbols::name_name(),      vmSymbols::string_signature());
   compute_offset(type_offset,      k, vmSymbols::type_name(),      vmSymbols::class_signature());
@@ -1897,7 +1907,7 @@
 
 
 void sun_reflect_ConstantPool::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_constant_pool_klass();
+  klassOop k = SystemDictionary::reflect_ConstantPool_klass();
   // This null test can be removed post beta
   if (k != NULL) {
     compute_offset(_cp_oop_offset, k, vmSymbols::constantPoolOop_name(), vmSymbols::object_signature());
@@ -1907,7 +1917,7 @@
 
 Handle sun_reflect_ConstantPool::create(TRAPS) {
   assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  klassOop k = SystemDictionary::reflect_constant_pool_klass();
+  klassOop k = SystemDictionary::reflect_ConstantPool_klass();
   instanceKlassHandle klass (THREAD, k);
   // Ensure it is initialized
   klass->initialize(CHECK_NH);
@@ -1927,7 +1937,7 @@
 }
 
 void sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_unsafe_static_field_accessor_impl_klass();
+  klassOop k = SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass();
   // This null test can be removed post beta
   if (k != NULL) {
     compute_offset(_base_offset, k,
@@ -2073,7 +2083,7 @@
 
 // Support for java_lang_ref_Reference
 oop java_lang_ref_Reference::pending_list_lock() {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass());
   char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset);
   if (UseCompressedOops) {
     return oopDesc::load_decode_heap_oop((narrowOop *)addr);
@@ -2083,7 +2093,7 @@
 }
 
 HeapWord *java_lang_ref_Reference::pending_list_addr() {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass());
   char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset);
   // XXX This might not be HeapWord aligned, almost rather be char *.
   return (HeapWord*)addr;
@@ -2106,17 +2116,17 @@
 }
 
 jlong java_lang_ref_SoftReference::clock() {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass());
   int offset = ik->offset_of_static_fields() + static_clock_offset;
 
-  return SystemDictionary::soft_reference_klass()->long_field(offset);
+  return SystemDictionary::SoftReference_klass()->long_field(offset);
 }
 
 void java_lang_ref_SoftReference::set_clock(jlong value) {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass());
   int offset = ik->offset_of_static_fields() + static_clock_offset;
 
-  SystemDictionary::soft_reference_klass()->long_field_put(offset, value);
+  SystemDictionary::SoftReference_klass()->long_field_put(offset, value);
 }
 
 
@@ -2404,6 +2414,10 @@
   return ptypes(mt)->obj_at(idx);
 }
 
+int java_dyn_MethodType::ptype_count(oop mt) {
+  return ptypes(mt)->length();
+}
+
 
 
 // Support for java_dyn_MethodTypeForm
@@ -2430,15 +2444,15 @@
 }
 
 
-// Support for sun_dyn_CallSiteImpl
-
-int sun_dyn_CallSiteImpl::_type_offset;
-int sun_dyn_CallSiteImpl::_target_offset;
-int sun_dyn_CallSiteImpl::_vmmethod_offset;
-
-void sun_dyn_CallSiteImpl::compute_offsets() {
+// Support for java_dyn_CallSite
+
+int java_dyn_CallSite::_type_offset;
+int java_dyn_CallSite::_target_offset;
+int java_dyn_CallSite::_vmmethod_offset;
+
+void java_dyn_CallSite::compute_offsets() {
   if (!EnableInvokeDynamic)  return;
-  klassOop k = SystemDictionary::CallSiteImpl_klass();
+  klassOop k = SystemDictionary::CallSite_klass();
   if (k != NULL) {
     compute_offset(_type_offset,   k, vmSymbols::type_name(),   vmSymbols::java_dyn_MethodType_signature(), true);
     compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
@@ -2446,23 +2460,23 @@
   }
 }
 
-oop sun_dyn_CallSiteImpl::type(oop site) {
+oop java_dyn_CallSite::type(oop site) {
   return site->obj_field(_type_offset);
 }
 
-oop sun_dyn_CallSiteImpl::target(oop site) {
+oop java_dyn_CallSite::target(oop site) {
   return site->obj_field(_target_offset);
 }
 
-void sun_dyn_CallSiteImpl::set_target(oop site, oop target) {
+void java_dyn_CallSite::set_target(oop site, oop target) {
   site->obj_field_put(_target_offset, target);
 }
 
-oop sun_dyn_CallSiteImpl::vmmethod(oop site) {
+oop java_dyn_CallSite::vmmethod(oop site) {
   return site->obj_field(_vmmethod_offset);
 }
 
-void sun_dyn_CallSiteImpl::set_vmmethod(oop site, oop ref) {
+void java_dyn_CallSite::set_vmmethod(oop site, oop ref) {
   site->obj_field_put(_vmmethod_offset, ref);
 }
 
@@ -2535,7 +2549,7 @@
     // the generated bytecodes for reflection, and if so, "magically"
     // delegate to its parent to prevent class loading from occurring
     // in places where applications using reflection didn't expect it.
-    klassOop delegating_cl_class = SystemDictionary::reflect_delegating_classloader_klass();
+    klassOop delegating_cl_class = SystemDictionary::reflect_DelegatingClassLoader_klass();
     // This might be null in non-1.4 JDKs
     if (delegating_cl_class != NULL && loader->is_a(delegating_cl_class)) {
       return parent(loader);
@@ -2550,7 +2564,7 @@
 void java_lang_System::compute_offsets() {
   assert(offset_of_static_fields == 0, "offsets should be initialized only once");
 
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::system_klass());
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::System_klass());
   offset_of_static_fields = ik->offset_of_static_fields();
 }
 
@@ -2811,7 +2825,7 @@
     java_dyn_MethodTypeForm::compute_offsets();
   }
   if (EnableInvokeDynamic) {
-    sun_dyn_CallSiteImpl::compute_offsets();
+    java_dyn_CallSite::compute_offsets();
   }
   java_security_AccessControlContext::compute_offsets();
   // Initialize reflection classes. The layouts of these classes
--- a/src/share/vm/classfile/javaClasses.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/javaClasses.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -111,7 +111,7 @@
 
   // Testers
   static bool is_instance(oop obj) {
-    return obj != NULL && obj->klass() == SystemDictionary::string_klass();
+    return obj != NULL && obj->klass() == SystemDictionary::String_klass();
   }
 
   // Debugging
@@ -161,7 +161,7 @@
   static void print_signature(oop java_class, outputStream *st);
   // Testing
   static bool is_instance(oop obj) {
-    return obj != NULL && obj->klass() == SystemDictionary::class_klass();
+    return obj != NULL && obj->klass() == SystemDictionary::Class_klass();
   }
   static bool is_primitive(oop java_class);
   static BasicType primitive_type(oop java_class);
@@ -1027,6 +1027,7 @@
   static oop            form(oop mt);
 
   static oop            ptype(oop mt, int index);
+  static int            ptype_count(oop mt);
 
   static symbolOop      as_signature(oop mt, bool intern_if_not_found, TRAPS);
   static void           print_signature(oop mt, outputStream* st);
@@ -1061,9 +1062,9 @@
 };
 
 
-// Interface to sun.dyn.CallSiteImpl objects
+// Interface to java.dyn.CallSite objects
 
-class sun_dyn_CallSiteImpl: AllStatic {
+class java_dyn_CallSite: AllStatic {
   friend class JavaClasses;
 
 private:
@@ -1083,6 +1084,14 @@
   static oop            vmmethod(oop site);
   static void       set_vmmethod(oop site, oop ref);
 
+  // Testers
+  static bool is_subclass(klassOop klass) {
+    return Klass::cast(klass)->is_subclass_of(SystemDictionary::CallSite_klass());
+  }
+  static bool is_instance(oop obj) {
+    return obj != NULL && is_subclass(obj->klass());
+  }
+
   // Accessors for code generation:
   static int target_offset_in_bytes()           { return _target_offset; }
   static int type_offset_in_bytes()             { return _type_offset; }
--- a/src/share/vm/classfile/loaderConstraints.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/loaderConstraints.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -430,7 +430,8 @@
 }
 
 
-void LoaderConstraintTable::verify(Dictionary* dictionary) {
+void LoaderConstraintTable::verify(Dictionary* dictionary,
+                                   PlaceholderTable* placeholders) {
   Thread *thread = Thread::current();
   for (int cindex = 0; cindex < _loader_constraint_size; cindex++) {
     for (LoaderConstraintEntry* probe = bucket(cindex);
@@ -445,7 +446,23 @@
         unsigned int d_hash = dictionary->compute_hash(name, loader);
         int d_index = dictionary->hash_to_index(d_hash);
         klassOop k = dictionary->find_class(d_index, d_hash, name, loader);
-        guarantee(k == probe->klass(), "klass should be in dictionary");
+        if (k != NULL) {
+          // We found the class in the system dictionary, so we should
+          // make sure that the klassOop matches what we already have.
+          guarantee(k == probe->klass(), "klass should be in dictionary");
+        } else {
+          // If we don't find the class in the system dictionary, it
+          // has to be in the placeholders table.
+          unsigned int p_hash = placeholders->compute_hash(name, loader);
+          int p_index = placeholders->hash_to_index(p_hash);
+          PlaceholderEntry* entry = placeholders->get_entry(p_index, p_hash,
+                                                            name, loader);
+
+          // The instanceKlass might not be on the entry, so the only
+          // thing we can check here is whether we were successful in
+          // finding the class in the placeholders table.
+          guarantee(entry != NULL, "klass should be in the placeholders");
+        }
       }
       for (int n = 0; n< probe->num_loaders(); n++) {
         guarantee(probe->loader(n)->is_oop_or_null(), "should be oop");
--- a/src/share/vm/classfile/loaderConstraints.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/loaderConstraints.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -81,7 +81,7 @@
 
   void purge_loader_constraints(BoolObjectClosure* is_alive);
 
-  void verify(Dictionary* dictionary);
+  void verify(Dictionary* dictionary, PlaceholderTable* placeholders);
 #ifndef PRODUCT
   void print();
 #endif
--- a/src/share/vm/classfile/systemDictionary.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/systemDictionary.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -60,10 +60,10 @@
 }
 
 void SystemDictionary::compute_java_system_loader(TRAPS) {
-  KlassHandle system_klass(THREAD, WK_KLASS(classloader_klass));
+  KlassHandle system_klass(THREAD, WK_KLASS(ClassLoader_klass));
   JavaValue result(T_OBJECT);
   JavaCalls::call_static(&result,
-                         KlassHandle(THREAD, WK_KLASS(classloader_klass)),
+                         KlassHandle(THREAD, WK_KLASS(ClassLoader_klass)),
                          vmSymbolHandles::getSystemClassLoader_name(),
                          vmSymbolHandles::void_classloader_signature(),
                          CHECK);
@@ -99,6 +99,15 @@
   return java_lang_Class::parallelCapable(class_loader());
 }
 // ----------------------------------------------------------------------------
+// ParallelDefineClass flag does not apply to bootclass loader
+bool SystemDictionary::is_parallelDefine(Handle class_loader) {
+   if (class_loader.is_null()) return false;
+   if (AllowParallelDefineClass && java_lang_Class::parallelCapable(class_loader())) {
+     return true;
+   }
+   return false;
+}
+// ----------------------------------------------------------------------------
 // Resolving of classes
 
 // Forwards to resolve_or_null
@@ -119,7 +128,7 @@
     // in which case we have to check whether the pending exception is a ClassNotFoundException,
     // and if so convert it to a NoClassDefFoundError
     // And chain the original ClassNotFoundException
-    if (throw_error && PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass())) {
+    if (throw_error && PENDING_EXCEPTION->is_a(SystemDictionary::ClassNotFoundException_klass())) {
       ResourceMark rm(THREAD);
       assert(klass_h() == NULL, "Should not have result with exception pending");
       Handle e(THREAD, PENDING_EXCEPTION);
@@ -350,7 +359,7 @@
 
   assert(class_loader() != NULL, "should not have non-null protection domain for null classloader");
 
-  KlassHandle system_loader(THREAD, SystemDictionary::classloader_klass());
+  KlassHandle system_loader(THREAD, SystemDictionary::ClassLoader_klass());
   JavaCalls::call_special(&result,
                          class_loader,
                          system_loader,
@@ -724,17 +733,17 @@
       // Do actual loading
       k = load_instance_class(name, class_loader, THREAD);
 
-      // For UnsyncloadClass and AllowParallelDefineClass only:
+      // For UnsyncloadClass only
       // If they got a linkageError, check if a parallel class load succeeded.
       // If it did, then for bytecode resolution the specification requires
       // that we return the same result we did for the other thread, i.e. the
       // successfully loaded instanceKlass
       // Should not get here for classloaders that support parallelism
-      // with the new cleaner mechanism
+      // with the new cleaner mechanism, even with AllowParallelDefineClass
       // Bootstrap goes through here to allow for an extra guarantee check
       if (UnsyncloadClass || (class_loader.is_null())) {
         if (k.is_null() && HAS_PENDING_EXCEPTION
-          && PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
+          && PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
           MutexLocker mu(SystemDictionary_lock, THREAD);
           klassOop check = find_class(d_index, d_hash, name, class_loader);
           if (check != NULL) {
@@ -1358,7 +1367,7 @@
 
     JavaValue result(T_OBJECT);
 
-    KlassHandle spec_klass (THREAD, SystemDictionary::classloader_klass());
+    KlassHandle spec_klass (THREAD, SystemDictionary::ClassLoader_klass());
 
     // Call public unsynchronized loadClass(String) directly for all class loaders
     // for parallelCapable class loaders. JDK >=7, loadClass(String, boolean) will
@@ -1483,14 +1492,17 @@
 }
 
 // Support parallel classloading
-// Initial implementation for bootstrap classloader
-// For custom class loaders that support parallel classloading,
+// All parallel class loaders, including bootstrap classloader
+// lock a placeholder entry for this class/class_loader pair
+// to allow parallel defines of different classes for this class loader
 // With AllowParallelDefine flag==true, in case they do not synchronize around
 // FindLoadedClass/DefineClass, calls, we check for parallel
 // loading for them, wait if a defineClass is in progress
 // and return the initial requestor's results
+// This flag does not apply to the bootstrap classloader.
 // With AllowParallelDefine flag==false, call through to define_instance_class
 // which will throw LinkageError: duplicate class definition.
+// False is the requested default.
 // For better performance, the class loaders should synchronize
 // findClass(), i.e. FindLoadedClass/DefineClassIfAbsent or they
 // potentially waste time reading and parsing the bytestream.
@@ -1511,9 +1523,11 @@
   {
     MutexLocker mu(SystemDictionary_lock, THREAD);
     // First check if class already defined
-    klassOop check = find_class(d_index, d_hash, name_h, class_loader);
-    if (check != NULL) {
-      return(instanceKlassHandle(THREAD, check));
+    if (UnsyncloadClass || (is_parallelDefine(class_loader))) {
+      klassOop check = find_class(d_index, d_hash, name_h, class_loader);
+      if (check != NULL) {
+        return(instanceKlassHandle(THREAD, check));
+      }
     }
 
     // Acquire define token for this class/classloader
@@ -1529,7 +1543,7 @@
     // Only special cases allow parallel defines and can use other thread's results
     // Other cases fall through, and may run into duplicate defines
     // caught by finding an entry in the SystemDictionary
-    if ((UnsyncloadClass || AllowParallelDefineClass) && (probe->instanceKlass() != NULL)) {
+    if ((UnsyncloadClass || is_parallelDefine(class_loader)) && (probe->instanceKlass() != NULL)) {
         probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
         placeholders()->find_and_remove(p_index, p_hash, name_h, class_loader, THREAD);
         SystemDictionary_lock->notify_all();
@@ -1930,13 +1944,13 @@
 
 
 void SystemDictionary::initialize_preloaded_classes(TRAPS) {
-  assert(WK_KLASS(object_klass) == NULL, "preloaded classes should only be initialized once");
+  assert(WK_KLASS(Object_klass) == NULL, "preloaded classes should only be initialized once");
   // Preload commonly used klasses
   WKID scan = FIRST_WKID;
   // first do Object, String, Class
-  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(class_klass), scan, CHECK);
+  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Class_klass), scan, CHECK);
 
-  debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(WK_KLASS(class_klass)));
+  debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(WK_KLASS(Class_klass)));
 
   // Fixup mirrors for classes loaded before java.lang.Class.
   // These calls iterate over the objects currently in the perm gen
@@ -1947,17 +1961,17 @@
   Universe::fixup_mirrors(CHECK);
 
   // do a bunch more:
-  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(reference_klass), scan, CHECK);
+  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Reference_klass), scan, CHECK);
 
   // Preload ref klasses and set reference types
-  instanceKlass::cast(WK_KLASS(reference_klass))->set_reference_type(REF_OTHER);
-  instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(reference_klass));
+  instanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER);
+  instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass));
 
-  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(phantom_reference_klass), scan, CHECK);
-  instanceKlass::cast(WK_KLASS(soft_reference_klass))->set_reference_type(REF_SOFT);
-  instanceKlass::cast(WK_KLASS(weak_reference_klass))->set_reference_type(REF_WEAK);
-  instanceKlass::cast(WK_KLASS(final_reference_klass))->set_reference_type(REF_FINAL);
-  instanceKlass::cast(WK_KLASS(phantom_reference_klass))->set_reference_type(REF_PHANTOM);
+  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK);
+  instanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT);
+  instanceKlass::cast(WK_KLASS(WeakReference_klass))->set_reference_type(REF_WEAK);
+  instanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL);
+  instanceKlass::cast(WK_KLASS(PhantomReference_klass))->set_reference_type(REF_PHANTOM);
 
   WKID meth_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
   WKID meth_group_end   = WK_KLASS_ENUM_NAME(WrongMethodTypeException_klass);
@@ -1970,10 +1984,10 @@
     scan = WKID(meth_group_end+1);
   }
   WKID indy_group_start = WK_KLASS_ENUM_NAME(Linkage_klass);
-  WKID indy_group_end   = WK_KLASS_ENUM_NAME(Dynamic_klass);
+  WKID indy_group_end   = WK_KLASS_ENUM_NAME(InvokeDynamic_klass);
   initialize_wk_klasses_until(indy_group_start, scan, CHECK);
   if (EnableInvokeDynamic) {
-    initialize_wk_klasses_through(indy_group_start, scan, CHECK);
+    initialize_wk_klasses_through(indy_group_end, scan, CHECK);
   }
   if (_well_known_klasses[indy_group_start] == NULL) {
     // Skip the rest of the dynamic typing classes, if Linkage is not loaded.
@@ -1982,14 +1996,14 @@
 
   initialize_wk_klasses_until(WKID_LIMIT, scan, CHECK);
 
-  _box_klasses[T_BOOLEAN] = WK_KLASS(boolean_klass);
-  _box_klasses[T_CHAR]    = WK_KLASS(char_klass);
-  _box_klasses[T_FLOAT]   = WK_KLASS(float_klass);
-  _box_klasses[T_DOUBLE]  = WK_KLASS(double_klass);
-  _box_klasses[T_BYTE]    = WK_KLASS(byte_klass);
-  _box_klasses[T_SHORT]   = WK_KLASS(short_klass);
-  _box_klasses[T_INT]     = WK_KLASS(int_klass);
-  _box_klasses[T_LONG]    = WK_KLASS(long_klass);
+  _box_klasses[T_BOOLEAN] = WK_KLASS(Boolean_klass);
+  _box_klasses[T_CHAR]    = WK_KLASS(Character_klass);
+  _box_klasses[T_FLOAT]   = WK_KLASS(Float_klass);
+  _box_klasses[T_DOUBLE]  = WK_KLASS(Double_klass);
+  _box_klasses[T_BYTE]    = WK_KLASS(Byte_klass);
+  _box_klasses[T_SHORT]   = WK_KLASS(Short_klass);
+  _box_klasses[T_INT]     = WK_KLASS(Integer_klass);
+  _box_klasses[T_LONG]    = WK_KLASS(Long_klass);
   //_box_klasses[T_OBJECT]  = WK_KLASS(object_klass);
   //_box_klasses[T_ARRAY]   = WK_KLASS(object_klass);
 
@@ -2000,11 +2014,11 @@
 #endif // KERNEL
 
   { // Compute whether we should use loadClass or loadClassInternal when loading classes.
-    methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
+    methodOop method = instanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
     _has_loadClassInternal = (method != NULL);
   }
   { // Compute whether we should use checkPackageAccess or NOT
-    methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
+    methodOop method = instanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
     _has_checkPackageAccess = (method != NULL);
   }
 }
@@ -2343,6 +2357,8 @@
   SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature);
   if (spe == NULL || spe->property_oop() == NULL) {
     // Must create lots of stuff here, but outside of the SystemDictionary lock.
+    if (THREAD->is_Compiler_thread())
+      return NULL;              // do not attempt from within compiler
     Handle mt = compute_method_handle_type(signature(),
                                            class_loader, protection_domain,
                                            CHECK_NULL);
@@ -2375,7 +2391,7 @@
                                                     TRAPS) {
   Handle empty;
   int npts = ArgumentCount(signature()).size();
-  objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::class_klass(), npts, CHECK_(empty));
+  objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::Class_klass(), npts, CHECK_(empty));
   int arg = 0;
   Handle rt;                            // the return type from the signature
   for (SignatureStream ss(signature()); !ss.is_done(); ss.next()) {
@@ -2421,7 +2437,7 @@
                                                 methodHandle mh_invdyn,
                                                 TRAPS) {
   Handle empty;
-  // call sun.dyn.CallSiteImpl::makeSite(caller, name, mtype, cmid, cbci)
+  // call java.dyn.CallSite::makeSite(caller, name, mtype, cmid, cbci)
   oop name_str_oop = StringTable::intern(name(), CHECK_(empty)); // not a handle!
   JavaCallArguments args(Handle(THREAD, caller->java_mirror()));
   args.push_oop(name_str_oop);
@@ -2430,17 +2446,19 @@
   args.push_int(caller_bci);
   JavaValue result(T_OBJECT);
   JavaCalls::call_static(&result,
-                         SystemDictionary::CallSiteImpl_klass(),
+                         SystemDictionary::CallSite_klass(),
                          vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
                          &args, CHECK_(empty));
   oop call_site_oop = (oop) result.get_jobject();
   assert(call_site_oop->is_oop()
-         /*&& sun_dyn_CallSiteImpl::is_instance(call_site_oop)*/, "must be sane");
-  sun_dyn_CallSiteImpl::set_vmmethod(call_site_oop, mh_invdyn());
+         /*&& java_dyn_CallSite::is_instance(call_site_oop)*/, "must be sane");
+  java_dyn_CallSite::set_vmmethod(call_site_oop, mh_invdyn());
   if (TraceMethodHandles) {
+#ifndef PRODUCT
     tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
     call_site_oop->print();
     tty->cr();
+#endif //PRODUCT
   }
   return call_site_oop;
 }
@@ -2453,9 +2471,17 @@
 
   instanceKlassHandle ik(THREAD, caller());
 
-  if (ik->bootstrap_method() != NULL) {
-    return Handle(THREAD, ik->bootstrap_method());
+  oop boot_method_oop = ik->bootstrap_method();
+  if (boot_method_oop != NULL) {
+    if (TraceMethodHandles) {
+      tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop);
+    }
+    NOT_PRODUCT(if (!boot_method_oop->is_oop()) { tty->print_cr("*** boot MH of "PTR_FORMAT" = "PTR_FORMAT, ik(), boot_method_oop); ik()->print(); });
+    assert(boot_method_oop->is_oop()
+           && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
+    return Handle(THREAD, boot_method_oop);
   }
+  boot_method_oop = NULL;  // GC safety
 
   // call java.dyn.Linkage::findBootstrapMethod(caller, sbk)
   JavaCallArguments args(Handle(THREAD, ik->java_mirror()));
@@ -2469,9 +2495,18 @@
                          vmSymbols::findBootstrapMethod_name(),
                          vmSymbols::findBootstrapMethod_signature(),
                          &args, CHECK_(empty));
-  oop boot_method_oop = (oop) result.get_jobject();
+  boot_method_oop = (oop) result.get_jobject();
 
   if (boot_method_oop != NULL) {
+    if (TraceMethodHandles) {
+#ifndef PRODUCT
+      tty->print_cr("--------");
+      tty->print_cr("bootstrap method for "PTR_FORMAT" computed as "PTR_FORMAT":", ik(), boot_method_oop);
+      ik()->print();
+      boot_method_oop->print();
+      tty->print_cr("========");
+#endif //PRODUCT
+    }
     assert(boot_method_oop->is_oop()
            && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
     // probably no race conditions, but let's be careful:
@@ -2480,6 +2515,14 @@
     else
       boot_method_oop = ik->bootstrap_method();
   } else {
+    if (TraceMethodHandles) {
+#ifndef PRODUCT
+      tty->print_cr("--------");
+      tty->print_cr("bootstrap method for "PTR_FORMAT" computed as NULL:", ik());
+      ik()->print();
+      tty->print_cr("========");
+#endif //PRODUCT
+    }
     boot_method_oop = ik->bootstrap_method();
   }
 
@@ -2547,7 +2590,7 @@
 
   // Verify constraint table
   guarantee(constraints() != NULL, "Verify of loader constraints failed");
-  constraints()->verify(dictionary());
+  constraints()->verify(dictionary(), placeholders());
 }
 
 
--- a/src/share/vm/classfile/systemDictionary.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/systemDictionary.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -82,55 +82,55 @@
 
 #define WK_KLASSES_DO(template)                                               \
   /* well-known classes */                                                    \
-  template(object_klass,                 java_lang_Object,               Pre) \
-  template(string_klass,                 java_lang_String,               Pre) \
-  template(class_klass,                  java_lang_Class,                Pre) \
-  template(cloneable_klass,              java_lang_Cloneable,            Pre) \
-  template(classloader_klass,            java_lang_ClassLoader,          Pre) \
-  template(serializable_klass,           java_io_Serializable,           Pre) \
-  template(system_klass,                 java_lang_System,               Pre) \
-  template(throwable_klass,              java_lang_Throwable,            Pre) \
-  template(error_klass,                  java_lang_Error,                Pre) \
-  template(threaddeath_klass,            java_lang_ThreadDeath,          Pre) \
-  template(exception_klass,              java_lang_Exception,            Pre) \
-  template(runtime_exception_klass,      java_lang_RuntimeException,     Pre) \
-  template(protectionDomain_klass,       java_security_ProtectionDomain, Pre) \
+  template(Object_klass,                 java_lang_Object,               Pre) \
+  template(String_klass,                 java_lang_String,               Pre) \
+  template(Class_klass,                  java_lang_Class,                Pre) \
+  template(Cloneable_klass,              java_lang_Cloneable,            Pre) \
+  template(ClassLoader_klass,            java_lang_ClassLoader,          Pre) \
+  template(Serializable_klass,           java_io_Serializable,           Pre) \
+  template(System_klass,                 java_lang_System,               Pre) \
+  template(Throwable_klass,              java_lang_Throwable,            Pre) \
+  template(Error_klass,                  java_lang_Error,                Pre) \
+  template(ThreadDeath_klass,            java_lang_ThreadDeath,          Pre) \
+  template(Exception_klass,              java_lang_Exception,            Pre) \
+  template(RuntimeException_klass,       java_lang_RuntimeException,     Pre) \
+  template(ProtectionDomain_klass,       java_security_ProtectionDomain, Pre) \
   template(AccessControlContext_klass,   java_security_AccessControlContext, Pre) \
-  template(classNotFoundException_klass, java_lang_ClassNotFoundException, Pre) \
-  template(noClassDefFoundError_klass,   java_lang_NoClassDefFoundError, Pre) \
-  template(linkageError_klass,           java_lang_LinkageError,         Pre) \
+  template(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre) \
+  template(NoClassDefFoundError_klass,   java_lang_NoClassDefFoundError, Pre) \
+  template(LinkageError_klass,           java_lang_LinkageError,         Pre) \
   template(ClassCastException_klass,     java_lang_ClassCastException,   Pre) \
   template(ArrayStoreException_klass,    java_lang_ArrayStoreException,  Pre) \
-  template(virtualMachineError_klass,    java_lang_VirtualMachineError,  Pre) \
+  template(VirtualMachineError_klass,    java_lang_VirtualMachineError,  Pre) \
   template(OutOfMemoryError_klass,       java_lang_OutOfMemoryError,     Pre) \
   template(StackOverflowError_klass,     java_lang_StackOverflowError,   Pre) \
   template(IllegalMonitorStateException_klass, java_lang_IllegalMonitorStateException, Pre) \
-  template(reference_klass,              java_lang_ref_Reference,        Pre) \
+  template(Reference_klass,              java_lang_ref_Reference,        Pre) \
                                                                               \
   /* Preload ref klasses and set reference types */                           \
-  template(soft_reference_klass,         java_lang_ref_SoftReference,    Pre) \
-  template(weak_reference_klass,         java_lang_ref_WeakReference,    Pre) \
-  template(final_reference_klass,        java_lang_ref_FinalReference,   Pre) \
-  template(phantom_reference_klass,      java_lang_ref_PhantomReference, Pre) \
-  template(finalizer_klass,              java_lang_ref_Finalizer,        Pre) \
+  template(SoftReference_klass,          java_lang_ref_SoftReference,    Pre) \
+  template(WeakReference_klass,          java_lang_ref_WeakReference,    Pre) \
+  template(FinalReference_klass,         java_lang_ref_FinalReference,   Pre) \
+  template(PhantomReference_klass,       java_lang_ref_PhantomReference, Pre) \
+  template(Finalizer_klass,              java_lang_ref_Finalizer,        Pre) \
                                                                               \
-  template(thread_klass,                 java_lang_Thread,               Pre) \
-  template(threadGroup_klass,            java_lang_ThreadGroup,          Pre) \
-  template(properties_klass,             java_util_Properties,           Pre) \
-  template(reflect_accessible_object_klass, java_lang_reflect_AccessibleObject, Pre) \
-  template(reflect_field_klass,          java_lang_reflect_Field,        Pre) \
-  template(reflect_method_klass,         java_lang_reflect_Method,       Pre) \
-  template(reflect_constructor_klass,    java_lang_reflect_Constructor,  Pre) \
+  template(Thread_klass,                 java_lang_Thread,               Pre) \
+  template(ThreadGroup_klass,            java_lang_ThreadGroup,          Pre) \
+  template(Properties_klass,             java_util_Properties,           Pre) \
+  template(reflect_AccessibleObject_klass, java_lang_reflect_AccessibleObject, Pre) \
+  template(reflect_Field_klass,          java_lang_reflect_Field,        Pre) \
+  template(reflect_Method_klass,         java_lang_reflect_Method,       Pre) \
+  template(reflect_Constructor_klass,    java_lang_reflect_Constructor,  Pre) \
                                                                               \
   /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
   /* Universe::is_gte_jdk14x_version() is not set up by this point. */        \
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */               \
-  template(reflect_magic_klass,          sun_reflect_MagicAccessorImpl,  Opt) \
-  template(reflect_method_accessor_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
-  template(reflect_constructor_accessor_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
-  template(reflect_delegating_classloader_klass, sun_reflect_DelegatingClassLoader, Opt) \
-  template(reflect_constant_pool_klass,  sun_reflect_ConstantPool,       Opt_Only_JDK15) \
-  template(reflect_unsafe_static_field_accessor_impl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
+  template(reflect_MagicAccessorImpl_klass,          sun_reflect_MagicAccessorImpl,  Opt) \
+  template(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
+  template(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
+  template(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt) \
+  template(reflect_ConstantPool_klass,  sun_reflect_ConstantPool,       Opt_Only_JDK15) \
+  template(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
                                                                               \
   /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
   template(MethodHandle_klass,           java_dyn_MethodHandle,          Opt) \
@@ -144,16 +144,14 @@
   template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \
   template(Linkage_klass,                java_dyn_Linkage,               Opt) \
   template(CallSite_klass,               java_dyn_CallSite,              Opt) \
-  template(CallSiteImpl_klass,           sun_dyn_CallSiteImpl,     Opt) \
-  template(Dynamic_klass,                java_dyn_Dynamic,               Opt) \
-  /* Note: MethodHandle must be first, and Dynamic last in group */           \
+  template(InvokeDynamic_klass,          java_dyn_InvokeDynamic,         Opt) \
+  /* Note: MethodHandle must be first, and InvokeDynamic last in group */     \
                                                                               \
-  template(vector_klass,                 java_util_Vector,               Pre) \
-  template(hashtable_klass,              java_util_Hashtable,            Pre) \
-  template(stringBuffer_klass,           java_lang_StringBuffer,         Pre) \
+  template(StringBuffer_klass,           java_lang_StringBuffer,         Pre) \
+  template(StringBuilder_klass,          java_lang_StringBuilder,        Pre) \
                                                                               \
   /* It's NULL in non-1.4 JDKs. */                                            \
-  template(stackTraceElement_klass,      java_lang_StackTraceElement,    Opt) \
+  template(StackTraceElement_klass,      java_lang_StackTraceElement,    Opt) \
   /* Universe::is_gte_jdk14x_version() is not set up by this point. */        \
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */               \
   template(java_nio_Buffer_klass,        java_nio_Buffer,                Opt) \
@@ -164,14 +162,14 @@
   template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
                                                                               \
   /* Preload boxing klasses */                                                \
-  template(boolean_klass,                java_lang_Boolean,              Pre) \
-  template(char_klass,                   java_lang_Character,            Pre) \
-  template(float_klass,                  java_lang_Float,                Pre) \
-  template(double_klass,                 java_lang_Double,               Pre) \
-  template(byte_klass,                   java_lang_Byte,                 Pre) \
-  template(short_klass,                  java_lang_Short,                Pre) \
-  template(int_klass,                    java_lang_Integer,              Pre) \
-  template(long_klass,                   java_lang_Long,                 Pre) \
+  template(Boolean_klass,                java_lang_Boolean,              Pre) \
+  template(Character_klass,              java_lang_Character,            Pre) \
+  template(Float_klass,                  java_lang_Float,                Pre) \
+  template(Double_klass,                 java_lang_Double,               Pre) \
+  template(Byte_klass,                   java_lang_Byte,                 Pre) \
+  template(Short_klass,                  java_lang_Short,                Pre) \
+  template(Integer_klass,                java_lang_Integer,              Pre) \
+  template(Long_klass,                   java_lang_Long,                 Pre) \
   /*end*/
 
 
@@ -438,8 +436,8 @@
   // Tells whether ClassLoader.checkPackageAccess is present
   static bool has_checkPackageAccess()      { return _has_checkPackageAccess; }
 
-  static bool class_klass_loaded()          { return WK_KLASS(class_klass) != NULL; }
-  static bool cloneable_klass_loaded()      { return WK_KLASS(cloneable_klass) != NULL; }
+  static bool Class_klass_loaded()          { return WK_KLASS(Class_klass) != NULL; }
+  static bool Cloneable_klass_loaded()      { return WK_KLASS(Cloneable_klass) != NULL; }
 
   // Returns default system loader
   static oop java_system_loader();
@@ -578,6 +576,7 @@
   static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
   static void check_loader_lock_contention(Handle loader_lock, TRAPS);
   static bool is_parallelCapable(Handle class_loader);
+  static bool is_parallelDefine(Handle class_loader);
 
   static klassOop find_shared_class(symbolHandle class_name);
 
--- a/src/share/vm/classfile/verifier.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/verifier.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -143,7 +143,7 @@
 
 bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
   symbolOop name = klass->name();
-  klassOop refl_magic_klass = SystemDictionary::reflect_magic_klass();
+  klassOop refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass();
 
   return (should_verify_for(klass->class_loader(), should_verify_class) &&
     // return if the class is a bootstrapping class
@@ -1903,17 +1903,8 @@
   verify_cp_type(index, cp, types, CHECK_VERIFY(this));
 
   // Get method name and signature
-  symbolHandle method_name;
-  symbolHandle method_sig;
-  if (opcode == Bytecodes::_invokedynamic) {
-    int name_index = cp->name_ref_index_at(index);
-    int sig_index  = cp->signature_ref_index_at(index);
-    method_name = symbolHandle(THREAD, cp->symbol_at(name_index));
-    method_sig  = symbolHandle(THREAD, cp->symbol_at(sig_index));
-  } else {
-    method_name = symbolHandle(THREAD, cp->name_ref_at(index));
-    method_sig  = symbolHandle(THREAD, cp->signature_ref_at(index));
-  }
+  symbolHandle method_name(THREAD, cp->name_ref_at(index));
+  symbolHandle method_sig(THREAD, cp->signature_ref_at(index));
 
   if (!SignatureVerifier::is_valid_method_signature(method_sig)) {
     class_format_error(
--- a/src/share/vm/classfile/vmSymbols.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/vmSymbols.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,6 +70,7 @@
 void vmSymbols::initialize(TRAPS) {
   assert((int)SID_LIMIT <= (1<<log2_SID_LIMIT), "must fit in this bitfield");
   assert((int)SID_LIMIT*5 > (1<<log2_SID_LIMIT), "make the bitfield smaller, please");
+  assert(vmIntrinsics::FLAG_LIMIT <= (1 << vmIntrinsics::log2_FLAG_LIMIT), "must fit in this bitfield");
 
   if (!UseSharedSpaces) {
     const char* string = &vm_symbol_bodies[0];
@@ -271,6 +272,58 @@
   return sid;
 }
 
+static vmIntrinsics::ID wrapper_intrinsic(BasicType type, bool unboxing) {
+#define TYPE2(type, unboxing) ((int)(type)*2 + ((unboxing) ? 1 : 0))
+  switch (TYPE2(type, unboxing)) {
+#define BASIC_TYPE_CASE(type, box, unbox) \
+    case TYPE2(type, false):  return vmIntrinsics::box; \
+    case TYPE2(type, true):   return vmIntrinsics::unbox
+    BASIC_TYPE_CASE(T_BOOLEAN, _Boolean_valueOf,   _booleanValue);
+    BASIC_TYPE_CASE(T_BYTE,    _Byte_valueOf,      _byteValue);
+    BASIC_TYPE_CASE(T_CHAR,    _Character_valueOf, _charValue);
+    BASIC_TYPE_CASE(T_SHORT,   _Short_valueOf,     _shortValue);
+    BASIC_TYPE_CASE(T_INT,     _Integer_valueOf,   _intValue);
+    BASIC_TYPE_CASE(T_LONG,    _Long_valueOf,      _longValue);
+    BASIC_TYPE_CASE(T_FLOAT,   _Float_valueOf,     _floatValue);
+    BASIC_TYPE_CASE(T_DOUBLE,  _Double_valueOf,    _doubleValue);
+#undef BASIC_TYPE_CASE
+  }
+#undef TYPE2
+  return vmIntrinsics::_none;
+}
+
+vmIntrinsics::ID vmIntrinsics::for_boxing(BasicType type) {
+  return wrapper_intrinsic(type, false);
+}
+vmIntrinsics::ID vmIntrinsics::for_unboxing(BasicType type) {
+  return wrapper_intrinsic(type, true);
+}
+
+vmIntrinsics::ID vmIntrinsics::for_raw_conversion(BasicType src, BasicType dest) {
+#define SRC_DEST(s,d) (((int)(s) << 4) + (int)(d))
+  switch (SRC_DEST(src, dest)) {
+  case SRC_DEST(T_INT, T_FLOAT):   return vmIntrinsics::_intBitsToFloat;
+  case SRC_DEST(T_FLOAT, T_INT):   return vmIntrinsics::_floatToRawIntBits;
+
+  case SRC_DEST(T_LONG, T_DOUBLE): return vmIntrinsics::_longBitsToDouble;
+  case SRC_DEST(T_DOUBLE, T_LONG): return vmIntrinsics::_doubleToRawLongBits;
+  }
+#undef SRC_DEST
+
+  return vmIntrinsics::_none;
+}
+
+methodOop vmIntrinsics::method_for(vmIntrinsics::ID id) {
+  if (id == _none)  return NULL;
+  symbolOop cname = vmSymbols::symbol_at(class_for(id));
+  symbolOop mname = vmSymbols::symbol_at(name_for(id));
+  symbolOop msig  = vmSymbols::symbol_at(signature_for(id));
+  if (cname == NULL || mname == NULL || msig == NULL)  return NULL;
+  klassOop k = SystemDictionary::find_well_known_klass(cname);
+  if (k == NULL)  return NULL;
+  return instanceKlass::cast(k)->find_method(mname, msig);
+}
+
 
 #define VM_INTRINSIC_INITIALIZE(id, klass, name, sig, flags) #id "\0"
 static const char* vm_intrinsic_name_bodies =
@@ -303,6 +356,11 @@
   const int neg = JVM_ACC_STATIC | JVM_ACC_SYNCHRONIZED;
   return (flags & (req | neg)) == req;
 }
+inline bool match_F_Y(jshort flags) {
+  const int req = JVM_ACC_SYNCHRONIZED;
+  const int neg = JVM_ACC_STATIC;
+  return (flags & (req | neg)) == req;
+}
 inline bool match_F_RN(jshort flags) {
   const int req = JVM_ACC_NATIVE;
   const int neg = JVM_ACC_STATIC | JVM_ACC_SYNCHRONIZED;
@@ -325,15 +383,15 @@
 }
 
 // These are for forming case labels:
-#define ID3(x, y, z) (( jint)(z) +                                  \
-                      ((jint)(y) <<    vmSymbols::log2_SID_LIMIT) + \
-                      ((jint)(x) << (2*vmSymbols::log2_SID_LIMIT))  )
+#define ID3(x, y, z) (( jlong)(z) +                                  \
+                      ((jlong)(y) <<    vmSymbols::log2_SID_LIMIT) + \
+                      ((jlong)(x) << (2*vmSymbols::log2_SID_LIMIT))  )
 #define SID_ENUM(n) vmSymbols::VM_SYMBOL_ENUM_NAME(n)
 
-vmIntrinsics::ID vmIntrinsics::find_id(vmSymbols::SID holder,
-                                       vmSymbols::SID name,
-                                       vmSymbols::SID sig,
-                                       jshort flags) {
+vmIntrinsics::ID vmIntrinsics::find_id_impl(vmSymbols::SID holder,
+                                            vmSymbols::SID name,
+                                            vmSymbols::SID sig,
+                                            jshort flags) {
   assert((int)vmSymbols::SID_LIMIT <= (1<<vmSymbols::log2_SID_LIMIT), "must fit");
 
   // Let the C compiler build the decision tree.
@@ -361,6 +419,7 @@
   const char* sname = vmSymbols::name_for(signature_for(id));
   const char* fname = "";
   switch (flags_for(id)) {
+  case F_Y:  fname = "synchronized ";  break;
   case F_RN: fname = "native ";        break;
   case F_SN: fname = "native static "; break;
   case F_S:  fname = "static ";        break;
@@ -377,62 +436,50 @@
 }
 
 
-// These are for friendly printouts of intrinsics:
+// These are to get information about intrinsics.
+
+#define ID4(x, y, z, f) ((ID3(x, y, z) << vmIntrinsics::log2_FLAG_LIMIT) | (jlong) (f))
+
+static const jlong intrinsic_info_array[vmIntrinsics::ID_LIMIT+1] = {
+#define VM_INTRINSIC_INFO(ignore_id, klass, name, sig, fcode) \
+  ID4(SID_ENUM(klass), SID_ENUM(name), SID_ENUM(sig), vmIntrinsics::fcode),
+
+  0, VM_INTRINSICS_DO(VM_INTRINSIC_INFO,
+                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE)
+    0
+#undef VM_INTRINSIC_INFO
+};
+
+inline jlong intrinsic_info(vmIntrinsics::ID id) {
+  return intrinsic_info_array[vmIntrinsics::ID_from((int)id)];
+}
 
 vmSymbols::SID vmIntrinsics::class_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
-  case id: return SID_ENUM(klass);
-
-  switch (id) {
-    VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
-                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
-  }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
-  return vmSymbols::NO_SID;
+  jlong info = intrinsic_info(id);
+  int shift = 2*vmSymbols::log2_SID_LIMIT + log2_FLAG_LIMIT, mask = right_n_bits(vmSymbols::log2_SID_LIMIT);
+  assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 1021, "");
+  return vmSymbols::SID( (info >> shift) & mask );
 }
 
 vmSymbols::SID vmIntrinsics::name_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
-  case id: return SID_ENUM(name);
-
-  switch (id) {
-    VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
-                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
-  }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
-  return vmSymbols::NO_SID;
+  jlong info = intrinsic_info(id);
+  int shift = vmSymbols::log2_SID_LIMIT + log2_FLAG_LIMIT, mask = right_n_bits(vmSymbols::log2_SID_LIMIT);
+  assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 1022, "");
+  return vmSymbols::SID( (info >> shift) & mask );
 }
 
 vmSymbols::SID vmIntrinsics::signature_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
-  case id: return SID_ENUM(sig);
-
-  switch (id) {
-    VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
-                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
-  }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
-  return vmSymbols::NO_SID;
+  jlong info = intrinsic_info(id);
+  int shift = log2_FLAG_LIMIT, mask = right_n_bits(vmSymbols::log2_SID_LIMIT);
+  assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 1023, "");
+  return vmSymbols::SID( (info >> shift) & mask );
 }
 
 vmIntrinsics::Flags vmIntrinsics::flags_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
-  case id: return fcode;
-
-  switch (id) {
-    VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
-                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
-  }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
-  return F_none;
+  jlong info = intrinsic_info(id);
+  int shift = 0, mask = right_n_bits(log2_FLAG_LIMIT);
+  assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 15, "");
+  return Flags( (info >> shift) & mask );
 }
 
 
--- a/src/share/vm/classfile/vmSymbols.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/classfile/vmSymbols.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -84,6 +84,7 @@
   template(java_lang_reflect_Field,                   "java/lang/reflect/Field")                  \
   template(java_lang_reflect_Array,                   "java/lang/reflect/Array")                  \
   template(java_lang_StringBuffer,                    "java/lang/StringBuffer")                   \
+  template(java_lang_StringBuilder,                   "java/lang/StringBuilder")                  \
   template(java_lang_CharSequence,                    "java/lang/CharSequence")                   \
   template(java_security_AccessControlContext,        "java/security/AccessControlContext")       \
   template(java_security_ProtectionDomain,            "java/security/ProtectionDomain")           \
@@ -104,6 +105,7 @@
   template(java_lang_AssertionStatusDirectives,       "java/lang/AssertionStatusDirectives")      \
   template(sun_jkernel_DownloadManager,               "sun/jkernel/DownloadManager")              \
   template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
+  template(setBootClassLoaderHook_name,               "setBootClassLoaderHook")                   \
                                                                                                   \
   /* class file format tags */                                                                    \
   template(tag_source_file,                           "SourceFile")                               \
@@ -217,7 +219,7 @@
   template(base_name,                                 "base")                                     \
                                                                                                   \
   /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */                                   \
-  template(java_dyn_Dynamic,                          "java/dyn/Dynamic")                         \
+  template(java_dyn_InvokeDynamic,                    "java/dyn/InvokeDynamic")                   \
   template(java_dyn_Linkage,                          "java/dyn/Linkage")                         \
   template(java_dyn_CallSite,                         "java/dyn/CallSite")                        \
   template(java_dyn_MethodHandle,                     "java/dyn/MethodHandle")                    \
@@ -233,10 +235,9 @@
   template(sun_dyn_AdapterMethodHandle,               "sun/dyn/AdapterMethodHandle")              \
   template(sun_dyn_BoundMethodHandle,                 "sun/dyn/BoundMethodHandle")                \
   template(sun_dyn_DirectMethodHandle,                "sun/dyn/DirectMethodHandle")               \
-  template(sun_dyn_CallSiteImpl,                      "sun/dyn/CallSiteImpl")                     \
   template(makeImpl_name,                             "makeImpl") /*MethodType::makeImpl*/        \
   template(makeImpl_signature,    "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \
-  template(makeSite_name,                             "makeSite") /*CallSiteImpl::makeImpl*/       \
+  template(makeSite_name,                             "makeSite") /*CallSite::makeSite*/          \
   template(makeSite_signature,    "(Ljava/lang/Class;Ljava/lang/String;Ljava/dyn/MethodType;II)Ljava/dyn/CallSite;") \
   template(findBootstrapMethod_name,                  "findBootstrapMethod")                      \
   template(findBootstrapMethod_signature, "(Ljava/lang/Class;Ljava/lang/Class;)Ljava/dyn/MethodHandle;") \
@@ -335,6 +336,7 @@
   template(ptypes_name,                               "ptypes")                                   \
   template(form_name,                                 "form")                                     \
   template(erasedType_name,                           "erasedType")                               \
+  template(append_name,                               "append")                                   \
                                                                                                   \
   /* non-intrinsic name/signature pairs: */                                                       \
   template(register_method_name,                      "register")                                 \
@@ -345,9 +347,14 @@
                                                                                                   \
   /* common signatures names */                                                                   \
   template(void_method_signature,                     "()V")                                      \
+  template(void_boolean_signature,                    "()Z")                                      \
+  template(void_byte_signature,                       "()B")                                      \
+  template(void_char_signature,                       "()C")                                      \
+  template(void_short_signature,                      "()S")                                      \
   template(void_int_signature,                        "()I")                                      \
   template(void_long_signature,                       "()J")                                      \
-  template(void_boolean_signature,                    "()Z")                                      \
+  template(void_float_signature,                      "()F")                                      \
+  template(void_double_signature,                     "()D")                                      \
   template(int_void_signature,                        "(I)V")                                     \
   template(int_int_signature,                         "(I)I")                                     \
   template(int_bool_signature,                        "(I)Z")                                     \
@@ -416,6 +423,13 @@
   template(string_signature,                          "Ljava/lang/String;")                                       \
   template(reference_signature,                       "Ljava/lang/ref/Reference;")                                \
   template(concurrenthashmap_signature,               "Ljava/util/concurrent/ConcurrentHashMap;")                 \
+  template(String_StringBuilder_signature,            "(Ljava/lang/String;)Ljava/lang/StringBuilder;")            \
+  template(int_StringBuilder_signature,               "(I)Ljava/lang/StringBuilder;")                             \
+  template(char_StringBuilder_signature,              "(C)Ljava/lang/StringBuilder;")                             \
+  template(String_StringBuffer_signature,             "(Ljava/lang/String;)Ljava/lang/StringBuffer;")             \
+  template(int_StringBuffer_signature,                "(I)Ljava/lang/StringBuffer;")                              \
+  template(char_StringBuffer_signature,               "(C)Ljava/lang/StringBuffer;")                              \
+  template(int_String_signature,                      "(I)Ljava/lang/String;")                                    \
   /* signature symbols needed by intrinsics */                                                                    \
   VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE)            \
                                                                                                                   \
@@ -815,12 +829,76 @@
     /*the compiler does have special inlining code for these; bytecode inline is just fine */                           \
                                                                                                                         \
   do_intrinsic(_fillInStackTrace,         java_lang_Throwable, fillInStackTrace_name, void_throwable_signature,  F_RNY) \
-                                                                                                                        \
-  do_intrinsic(_Object_init,              java_lang_Object, object_initializer_name, void_method_signature,      F_R)   \
-  /*    (symbol object_initializer_name defined above) */                                                               \
-                                                                                                                        \
+                                                                                                                          \
+  do_intrinsic(_StringBuilder_void,   java_lang_StringBuilder, object_initializer_name, void_method_signature,     F_R)   \
+  do_intrinsic(_StringBuilder_int,    java_lang_StringBuilder, object_initializer_name, int_void_signature,        F_R)   \
+  do_intrinsic(_StringBuilder_String, java_lang_StringBuilder, object_initializer_name, string_void_signature,     F_R)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuilder_append_char,   java_lang_StringBuilder, append_name, char_StringBuilder_signature,   F_R)   \
+  do_intrinsic(_StringBuilder_append_int,    java_lang_StringBuilder, append_name, int_StringBuilder_signature,    F_R)   \
+  do_intrinsic(_StringBuilder_append_String, java_lang_StringBuilder, append_name, String_StringBuilder_signature, F_R)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuilder_toString, java_lang_StringBuilder, toString_name, void_string_signature,             F_R)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuffer_void,   java_lang_StringBuffer, object_initializer_name, void_method_signature,       F_R)   \
+  do_intrinsic(_StringBuffer_int,    java_lang_StringBuffer, object_initializer_name, int_void_signature,          F_R)   \
+  do_intrinsic(_StringBuffer_String, java_lang_StringBuffer, object_initializer_name, string_void_signature,       F_R)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuffer_append_char,   java_lang_StringBuffer, append_name, char_StringBuffer_signature,      F_Y)   \
+  do_intrinsic(_StringBuffer_append_int,    java_lang_StringBuffer, append_name, int_StringBuffer_signature,       F_Y)   \
+  do_intrinsic(_StringBuffer_append_String, java_lang_StringBuffer, append_name, String_StringBuffer_signature,    F_Y)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuffer_toString,  java_lang_StringBuffer, toString_name, void_string_signature,              F_Y)   \
+                                                                                                                          \
+  do_intrinsic(_Integer_toString,      java_lang_Integer, toString_name, int_String_signature,                     F_S)   \
+                                                                                                                          \
+  do_intrinsic(_String_String, java_lang_String, object_initializer_name, string_void_signature,                   F_R)   \
+                                                                                                                          \
+  do_intrinsic(_Object_init,              java_lang_Object, object_initializer_name, void_method_signature,        F_R)   \
+  /*    (symbol object_initializer_name defined above) */                                                                 \
+                                                                                                                          \
   do_intrinsic(_invoke,                   java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
   /*   (symbols invoke_name and invoke_signature defined above) */                                                      \
+  do_intrinsic(_checkSpreadArgument,      sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \
+   do_name(    checkSpreadArgument_name,       "checkSpreadArgument")                                                   \
+   do_name(    checkSpreadArgument_signature,  "(Ljava/lang/Object;I)V")                                                \
+                                                                                                                        \
+  /* unboxing methods: */                                                                                               \
+  do_intrinsic(_booleanValue,             java_lang_Boolean,      booleanValue_name, void_boolean_signature, F_R)       \
+   do_name(     booleanValue_name,       "booleanValue")                                                                \
+  do_intrinsic(_byteValue,                java_lang_Byte,         byteValue_name, void_byte_signature, F_R)             \
+   do_name(     byteValue_name,          "byteValue")                                                                   \
+  do_intrinsic(_charValue,                java_lang_Character,    charValue_name, void_char_signature, F_R)             \
+   do_name(     charValue_name,          "charValue")                                                                   \
+  do_intrinsic(_shortValue,               java_lang_Short,        shortValue_name, void_short_signature, F_R)           \
+   do_name(     shortValue_name,         "shortValue")                                                                  \
+  do_intrinsic(_intValue,                 java_lang_Integer,      intValue_name, void_int_signature, F_R)               \
+   do_name(     intValue_name,           "intValue")                                                                    \
+  do_intrinsic(_longValue,                java_lang_Long,         longValue_name, void_long_signature, F_R)             \
+   do_name(     longValue_name,          "longValue")                                                                   \
+  do_intrinsic(_floatValue,               java_lang_Float,        floatValue_name, void_float_signature, F_R)           \
+   do_name(     floatValue_name,         "floatValue")                                                                  \
+  do_intrinsic(_doubleValue,              java_lang_Double,       doubleValue_name, void_double_signature, F_R)         \
+   do_name(     doubleValue_name,        "doubleValue")                                                                 \
+                                                                                                                        \
+  /* boxing methods: */                                                                                                 \
+   do_name(    valueOf_name,              "valueOf")                                                                    \
+  do_intrinsic(_Boolean_valueOf,          java_lang_Boolean,      valueOf_name, Boolean_valueOf_signature, F_S)         \
+   do_name(     Boolean_valueOf_signature,                       "(Z)Ljava/lang/Boolean;")                              \
+  do_intrinsic(_Byte_valueOf,             java_lang_Byte,         valueOf_name, Byte_valueOf_signature, F_S)            \
+   do_name(     Byte_valueOf_signature,                          "(B)Ljava/lang/Byte;")                                 \
+  do_intrinsic(_Character_valueOf,        java_lang_Character,    valueOf_name, Character_valueOf_signature, F_S)       \
+   do_name(     Character_valueOf_signature,                     "(C)Ljava/lang/Character;")                            \
+  do_intrinsic(_Short_valueOf,            java_lang_Short,        valueOf_name, Short_valueOf_signature, F_S)           \
+   do_name(     Short_valueOf_signature,                         "(S)Ljava/lang/Short;")                                \
+  do_intrinsic(_Integer_valueOf,          java_lang_Integer,      valueOf_name, Integer_valueOf_signature, F_S)         \
+   do_name(     Integer_valueOf_signature,                       "(I)Ljava/lang/Integer;")                              \
+  do_intrinsic(_Long_valueOf,             java_lang_Long,         valueOf_name, Long_valueOf_signature, F_S)            \
+   do_name(     Long_valueOf_signature,                          "(J)Ljava/lang/Long;")                                 \
+  do_intrinsic(_Float_valueOf,            java_lang_Float,        valueOf_name, Float_valueOf_signature, F_S)           \
+   do_name(     Float_valueOf_signature,                         "(F)Ljava/lang/Float;")                                \
+  do_intrinsic(_Double_valueOf,           java_lang_Double,       valueOf_name, Double_valueOf_signature, F_S)          \
+   do_name(     Double_valueOf_signature,                        "(D)Ljava/lang/Double;")                               \
                                                                                                                         \
     /*end*/
 
@@ -946,11 +1024,17 @@
   enum Flags {
     // AccessFlags syndromes relevant to intrinsics.
     F_none = 0,
-    F_R,                        // !static        !synchronized (R="regular")
-    F_S,                        //  static        !synchronized
-    F_RN,                       // !static native !synchronized
-    F_SN,                       //  static native !synchronized
-    F_RNY                       // !static native  synchronized
+    F_R,                        // !static ?native !synchronized (R="regular")
+    F_S,                        //  static ?native !synchronized
+    F_Y,                        // !static ?native  synchronized
+    F_RN,                       // !static  native !synchronized
+    F_SN,                       //  static  native !synchronized
+    F_RNY,                      // !static  native  synchronized
+
+    FLAG_LIMIT
+  };
+  enum {
+    log2_FLAG_LIMIT = 4         // checked by an assert at start-up
   };
 
 public:
@@ -962,15 +1046,32 @@
 
   static const char* name_at(ID id);
 
+private:
+  static ID find_id_impl(vmSymbols::SID holder,
+                         vmSymbols::SID name,
+                         vmSymbols::SID sig,
+                         jshort flags);
+
+public:
   // Given a method's class, name, signature, and access flags, report its ID.
   static ID find_id(vmSymbols::SID holder,
                     vmSymbols::SID name,
                     vmSymbols::SID sig,
-                    jshort flags);
+                    jshort flags) {
+    ID id = find_id_impl(holder, name, sig, flags);
+#ifdef ASSERT
+    // ID _none does not hold the following asserts.
+    if (id == _none)  return id;
+#endif
+    assert(    class_for(id) == holder, "correct id");
+    assert(     name_for(id) == name,   "correct id");
+    assert(signature_for(id) == sig,    "correct id");
+    return id;
+  }
 
   static void verify_method(ID actual_id, methodOop m) PRODUCT_RETURN;
 
-  // No need for these in the product:
+  // Find out the symbols behind an intrinsic:
   static vmSymbols::SID     class_for(ID id);
   static vmSymbols::SID      name_for(ID id);
   static vmSymbols::SID signature_for(ID id);
@@ -980,4 +1081,11 @@
 
   // Access to intrinsic methods:
   static methodOop method_for(ID id);
+
+  // Wrapper object methods:
+  static ID for_boxing(BasicType type);
+  static ID for_unboxing(BasicType type);
+
+  // Raw conversion:
+  static ID for_raw_conversion(BasicType src, BasicType dest);
 };
--- a/src/share/vm/code/codeBlob.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/codeBlob.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,6 +102,9 @@
   virtual bool is_compiled_by_c2() const         { return false; }
   virtual bool is_compiled_by_c1() const         { return false; }
 
+  // Casting
+  nmethod* as_nmethod_or_null()                  { return is_nmethod() ? (nmethod*) this : NULL; }
+
   // Boundaries
   address    header_begin() const                { return (address)    this; }
   address    header_end() const                  { return ((address)   this) + _header_size; };
@@ -201,7 +204,8 @@
   virtual void print_value_on(outputStream* st) const PRODUCT_RETURN;
 
   // Print the comment associated with offset on stream, if there is one
-  void print_block_comment(outputStream* stream, intptr_t offset) {
+  virtual void print_block_comment(outputStream* stream, address block_begin) {
+    intptr_t offset = (intptr_t)(block_begin - instructions_begin());
     _comments.print_block_comment(stream, offset);
   }
 
--- a/src/share/vm/code/codeCache.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/codeCache.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -96,6 +96,7 @@
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 bool CodeCache::_needs_cache_clean = false;
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
+nmethod* CodeCache::_saved_nmethods = NULL;
 
 
 CodeBlob* CodeCache::first() {
@@ -395,6 +396,85 @@
 }
 #endif //PRODUCT
 
+
+nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
+  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  nmethod* saved = _saved_nmethods;
+  nmethod* prev = NULL;
+  while (saved != NULL) {
+    if (saved->is_in_use() && saved->method() == m) {
+      if (prev != NULL) {
+        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
+      } else {
+        _saved_nmethods = saved->saved_nmethod_link();
+      }
+      assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
+      saved->set_speculatively_disconnected(false);
+      saved->set_saved_nmethod_link(NULL);
+      if (PrintMethodFlushing) {
+        saved->print_on(tty, " ### nmethod is reconnected");
+      }
+      if (LogCompilation && (xtty != NULL)) {
+        ttyLocker ttyl;
+        xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
+        xtty->method(methodOop(m));
+        xtty->stamp();
+        xtty->end_elem();
+      }
+      return saved;
+    }
+    prev = saved;
+    saved = saved->saved_nmethod_link();
+  }
+  return NULL;
+}
+
+void CodeCache::remove_saved_code(nmethod* nm) {
+  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
+  nmethod* saved = _saved_nmethods;
+  nmethod* prev = NULL;
+  while (saved != NULL) {
+    if (saved == nm) {
+      if (prev != NULL) {
+        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
+      } else {
+        _saved_nmethods = saved->saved_nmethod_link();
+      }
+      if (LogCompilation && (xtty != NULL)) {
+        ttyLocker ttyl;
+        xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
+        xtty->stamp();
+        xtty->end_elem();
+      }
+      return;
+    }
+    prev = saved;
+    saved = saved->saved_nmethod_link();
+  }
+  ShouldNotReachHere();
+}
+
+void CodeCache::speculatively_disconnect(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
+  nm->set_saved_nmethod_link(_saved_nmethods);
+  _saved_nmethods = nm;
+  if (PrintMethodFlushing) {
+    nm->print_on(tty, " ### nmethod is speculatively disconnected");
+  }
+  if (LogCompilation && (xtty != NULL)) {
+    ttyLocker ttyl;
+    xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
+    xtty->method(methodOop(nm->method()));
+    xtty->stamp();
+    xtty->end_elem();
+  }
+  nm->method()->clear_code();
+  nm->set_speculatively_disconnected(true);
+}
+
+
 void CodeCache::gc_prologue() {
   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 }
--- a/src/share/vm/code/codeCache.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/codeCache.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -46,6 +46,7 @@
   static int _number_of_nmethods_with_dependencies;
   static bool _needs_cache_clean;
   static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
+  static nmethod* _saved_nmethods;          // linked via nm->saved_nmethod_look()
 
   static void verify_if_often() PRODUCT_RETURN;
 
@@ -141,11 +142,16 @@
   static size_t  capacity()                      { return _heap->capacity(); }
   static size_t  max_capacity()                  { return _heap->max_capacity(); }
   static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
+  static bool    needs_flushing()                { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
 
   static bool needs_cache_clean()                { return _needs_cache_clean; }
   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   static void clear_inline_caches();             // clear all inline caches
 
+  static nmethod* find_and_remove_saved_code(methodOop m);
+  static void remove_saved_code(nmethod* nm);
+  static void speculatively_disconnect(nmethod* nm);
+
   // Deoptimization
   static int  mark_for_deoptimization(DepChange& changes);
 #ifdef HOTSWAP
--- a/src/share/vm/code/debugInfoRec.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/debugInfoRec.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -281,6 +281,8 @@
                                               ciMethod*   method,
                                               int         bci,
                                               bool        reexecute,
+                                              bool        is_method_handle_invoke,
+                                              bool        return_oop,
                                               DebugToken* locals,
                                               DebugToken* expressions,
                                               DebugToken* monitors) {
@@ -292,8 +294,10 @@
   int stream_offset = stream()->position();
   last_pd->set_scope_decode_offset(stream_offset);
 
-  // Record reexecute bit into pcDesc
+  // Record flags into pcDesc.
   last_pd->set_should_reexecute(reexecute);
+  last_pd->set_is_method_handle_invoke(is_method_handle_invoke);
+  last_pd->set_return_oop(return_oop);
 
   // serialize sender stream offest
   stream()->write_int(sender_stream_offset);
--- a/src/share/vm/code/debugInfoRec.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/debugInfoRec.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -88,6 +88,8 @@
                       ciMethod*   method,
                       int         bci,
                       bool        reexecute,
+                      bool        is_method_handle_invoke = false,
+                      bool        return_oop = false,
                       DebugToken* locals      = NULL,
                       DebugToken* expressions = NULL,
                       DebugToken* monitors    = NULL);
--- a/src/share/vm/code/dependencies.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/dependencies.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -843,13 +843,15 @@
     if (occasional_print || final_stats) {
       // Every now and then dump a little info about dependency searching.
       if (xtty != NULL) {
-        xtty->elem("deps_find_witness calls='%d' steps='%d' recursions='%d' singles='%d'",
+       ttyLocker ttyl;
+       xtty->elem("deps_find_witness calls='%d' steps='%d' recursions='%d' singles='%d'",
                    deps_find_witness_calls,
                    deps_find_witness_steps,
                    deps_find_witness_recursions,
                    deps_find_witness_singles);
       }
       if (final_stats || (TraceDependencies && WizardMode)) {
+        ttyLocker ttyl;
         tty->print_cr("Dependency check (find_witness) "
                       "calls=%d, steps=%d (avg=%.1f), recursions=%d, singles=%d",
                       deps_find_witness_calls,
@@ -1528,19 +1530,23 @@
   int nsup = 0, nint = 0;
   for (ContextStream str(*this); str.next(); ) {
     klassOop k = str.klass();
-    switch (str._change_type) {
+    switch (str.change_type()) {
     case Change_new_type:
       tty->print_cr("  dependee = %s", instanceKlass::cast(k)->external_name());
       break;
     case Change_new_sub:
-      if (!WizardMode)
-           ++nsup;
-      else tty->print_cr("  context super = %s", instanceKlass::cast(k)->external_name());
+      if (!WizardMode) {
+        ++nsup;
+      } else {
+        tty->print_cr("  context super = %s", instanceKlass::cast(k)->external_name());
+      }
       break;
     case Change_new_impl:
-      if (!WizardMode)
-           ++nint;
-      else tty->print_cr("  context interface = %s", instanceKlass::cast(k)->external_name());
+      if (!WizardMode) {
+        ++nint;
+      } else {
+        tty->print_cr("  context interface = %s", instanceKlass::cast(k)->external_name());
+      }
       break;
     }
   }
--- a/src/share/vm/code/dependencies.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/dependencies.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -470,7 +470,7 @@
 // super types can be context types for a relevant dependency, which the
 // new type could invalidate.
 class DepChange : public StackObj {
- private:
+ public:
   enum ChangeType {
     NO_CHANGE = 0,              // an uninvolved klass
     Change_new_type,            // a newly loaded type
@@ -480,6 +480,7 @@
     Start_Klass = CHANGE_LIMIT  // internal indicator for ContextStream
   };
 
+ private:
   // each change set is rooted in exactly one new type (at present):
   KlassHandle _new_type;
 
@@ -510,15 +511,15 @@
   // }
   class ContextStream : public StackObj {
    private:
-    DepChange&       _changes;
+    DepChange&  _changes;
     friend class DepChange;
 
     // iteration variables:
-    ChangeType            _change_type;
-    klassOop              _klass;
-    objArrayOop           _ti_base;    // i.e., transitive_interfaces
-    int                   _ti_index;
-    int                   _ti_limit;
+    ChangeType  _change_type;
+    klassOop    _klass;
+    objArrayOop _ti_base;    // i.e., transitive_interfaces
+    int         _ti_index;
+    int         _ti_limit;
 
     // start at the beginning:
     void start() {
@@ -530,11 +531,11 @@
       _ti_limit = 0;
     }
 
+   public:
     ContextStream(DepChange& changes)
       : _changes(changes)
     { start(); }
 
-   public:
     ContextStream(DepChange& changes, No_Safepoint_Verifier& nsv)
       : _changes(changes)
       // the nsv argument makes it safe to hold oops like _klass
@@ -542,6 +543,7 @@
 
     bool next();
 
+    ChangeType change_type()     { return _change_type; }
     klassOop   klass()           { return _klass; }
   };
   friend class DepChange::ContextStream;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/code/jvmticmlr.h	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Sun designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Sun in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This header file defines the data structures sent by the VM
+ * through the JVMTI CompiledMethodLoad callback function via the
+ * "void * compile_info" parameter. The memory pointed to by the
+ * compile_info parameter may not be referenced after returning from
+ * the CompiledMethodLoad callback. These are VM implementation
+ * specific data structures that may evolve in future releases. A
+ * JVMTI agent should interpret a non-NULL compile_info as a pointer
+ * to a region of memory containing a list of records. In a typical
+ * usage scenario, a JVMTI agent would cast each record to a
+ * jvmtiCompiledMethodLoadRecordHeader, a struct that represents
+ * arbitrary information. This struct contains a kind field to indicate
+ * the kind of information being passed, and a pointer to the next
+ * record. If the kind field indicates inlining information, then the
+ * agent would cast the record to a jvmtiCompiledMethodLoadInlineRecord.
+ * This record contains an array of PCStackInfo structs, which indicate
+ * for every pc address what are the methods on the invocation stack.
+ * The "methods" and "bcis" fields in each PCStackInfo struct specify a
+ * 1-1 mapping between these inlined methods and their bytecode indices.
+ * This can be used to derive the proper source lines of the inlined
+ * methods.
+ */
+
+#ifndef _JVMTI_CMLR_H_
+#define _JVMTI_CMLR_H_
+
+enum {
+    JVMTI_CMLR_MAJOR_VERSION_1 = 0x00000001,
+    JVMTI_CMLR_MINOR_VERSION_0 = 0x00000000,
+
+    JVMTI_CMLR_MAJOR_VERSION   = 0x00000001,
+    JVMTI_CMLR_MINOR_VERSION   = 0x00000000
+
+    /*
+     * This comment is for the "JDK import from HotSpot" sanity check:
+     * version: 1.0.0
+     */
+};
+
+typedef enum {
+    JVMTI_CMLR_DUMMY       = 1,
+    JVMTI_CMLR_INLINE_INFO = 2
+} jvmtiCMLRKind;
+
+/*
+ * Record that represents arbitrary information passed through JVMTI
+ * CompiledMethodLoadEvent void pointer.
+ */
+typedef struct _jvmtiCompiledMethodLoadRecordHeader {
+  jvmtiCMLRKind kind;     /* id for the kind of info passed in the record */
+  jint majorinfoversion;  /* major and minor info version values. Init'ed */
+  jint minorinfoversion;  /* to current version value in jvmtiExport.cpp. */
+
+  struct _jvmtiCompiledMethodLoadRecordHeader* next;
+} jvmtiCompiledMethodLoadRecordHeader;
+
+/*
+ * Record that gives information about the methods on the compile-time
+ * stack at a specific pc address of a compiled method. Each element in
+ * the methods array maps to same element in the bcis array.
+ */
+typedef struct _PCStackInfo {
+  void* pc;             /* the pc address for this compiled method */
+  jint numstackframes;  /* number of methods on the stack */
+  jmethodID* methods;   /* array of numstackframes method ids */
+  jint* bcis;           /* array of numstackframes bytecode indices */
+} PCStackInfo;
+
+/*
+ * Record that contains inlining information for each pc address of
+ * an nmethod.
+ */
+typedef struct _jvmtiCompiledMethodLoadInlineRecord {
+  jvmtiCompiledMethodLoadRecordHeader header;  /* common header for casting */
+  jint numpcs;          /* number of pc descriptors in this nmethod */
+  PCStackInfo* pcinfo;  /* array of numpcs pc descriptors */
+} jvmtiCompiledMethodLoadInlineRecord;
+
+/*
+ * Dummy record used to test that we can pass records with different
+ * information through the void pointer provided that they can be cast
+ * to a jvmtiCompiledMethodLoadRecordHeader.
+ */
+
+typedef struct _jvmtiCompiledMethodLoadDummyRecord {
+  jvmtiCompiledMethodLoadRecordHeader header;  /* common header for casting */
+  char message[50];
+} jvmtiCompiledMethodLoadDummyRecord;
+
+#endif
--- a/src/share/vm/code/nmethod.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/nmethod.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,13 +56,13 @@
 #endif
 
 bool nmethod::is_compiled_by_c1() const {
+  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
   if (is_native_method()) return false;
-  assert(compiler() != NULL, "must be");
   return compiler()->is_c1();
 }
 bool nmethod::is_compiled_by_c2() const {
+  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
   if (is_native_method()) return false;
-  assert(compiler() != NULL, "must be");
   return compiler()->is_c2();
 }
 
@@ -414,9 +414,8 @@
 }
 
 const char* nmethod::compile_kind() const {
-  if (method() == NULL)    return "unloaded";
-  if (is_native_method())  return "c2n";
   if (is_osr_method())     return "osr";
+  if (method() != NULL && is_native_method())  return "c2n";
   return NULL;
 }
 
@@ -588,11 +587,13 @@
     _osr_link                = NULL;
     _scavenge_root_link      = NULL;
     _scavenge_root_state     = 0;
+    _saved_nmethod_link      = NULL;
     _compiler                = NULL;
     // We have no exception handler or deopt handler make the
     // values something that will never match a pc like the nmethod vtable entry
     _exception_offset        = 0;
     _deoptimize_offset       = 0;
+    _deoptimize_mh_offset    = 0;
     _orig_pc_offset          = 0;
 #ifdef HAVE_DTRACE_H
     _trap_offset             = 0;
@@ -683,6 +684,7 @@
     // values something that will never match a pc like the nmethod vtable entry
     _exception_offset        = 0;
     _deoptimize_offset       = 0;
+    _deoptimize_mh_offset    = 0;
     _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
     _orig_pc_offset          = 0;
     _stub_offset             = data_offset();
@@ -795,6 +797,7 @@
     // Exception handler and deopt handler are in the stub section
     _exception_offset        = _stub_offset + offsets->value(CodeOffsets::Exceptions);
     _deoptimize_offset       = _stub_offset + offsets->value(CodeOffsets::Deopt);
+    _deoptimize_mh_offset    = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
     _consts_offset           = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
     _scopes_data_offset      = data_offset();
     _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size         (), oopSize);
@@ -985,7 +988,8 @@
   PcDesc* pd = pc_desc_at(pc);
   guarantee(pd != NULL, "scope must be present");
   return new ScopeDesc(this, pd->scope_decode_offset(),
-                       pd->obj_decode_offset(), pd->should_reexecute());
+                       pd->obj_decode_offset(), pd->should_reexecute(),
+                       pd->return_oop());
 }
 
 
@@ -1034,7 +1038,7 @@
         if( cb != NULL && cb->is_nmethod() ) {
           nmethod* nm = (nmethod*)cb;
           // Clean inline caches pointing to both zombie and not_entrant methods
-          if (!nm->is_in_use()) ic->set_to_clean();
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
         }
         break;
       }
@@ -1044,7 +1048,7 @@
         if( cb != NULL && cb->is_nmethod() ) {
           nmethod* nm = (nmethod*)cb;
           // Clean inline caches pointing to both zombie and not_entrant methods
-          if (!nm->is_in_use()) csc->set_to_clean();
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
         }
         break;
       }
@@ -1114,7 +1118,6 @@
     if (_method->code() == this) {
       _method->clear_code(); // Break a cycle
     }
-    inc_decompile_count();     // Last chance to make a mark on the MDO
     _method = NULL;            // Clear the method of this dead nmethod
   }
   // Make the class unloaded - i.e., change state and notify sweeper
@@ -1127,6 +1130,9 @@
   }
   flags.state = unloaded;
 
+  // Log the unloading.
+  log_state_change();
+
   // The methodOop is gone at this point
   assert(_method == NULL, "Tautology");
 
@@ -1137,8 +1143,6 @@
 
 void nmethod::invalidate_osr_method() {
   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
-  if (_entry_bci != InvalidOSREntryBci)
-    inc_decompile_count();
   // Remove from list of active nmethods
   if (method() != NULL)
     instanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this);
@@ -1146,59 +1150,65 @@
   _entry_bci = InvalidOSREntryBci;
 }
 
-void nmethod::log_state_change(int state) const {
+void nmethod::log_state_change() const {
   if (LogCompilation) {
     if (xtty != NULL) {
       ttyLocker ttyl;  // keep the following output all in one block
-      xtty->begin_elem("make_not_entrant %sthread='" UINTX_FORMAT "'",
-                       (state == zombie ? "zombie='1' " : ""),
-                       os::current_thread_id());
+      if (flags.state == unloaded) {
+        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
+                         os::current_thread_id());
+      } else {
+        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
+                         os::current_thread_id(),
+                         (flags.state == zombie ? " zombie='1'" : ""));
+      }
       log_identity(xtty);
       xtty->stamp();
       xtty->end_elem();
     }
   }
-  if (PrintCompilation) {
-    print_on(tty, state == zombie ? "made zombie " : "made not entrant ");
+  if (PrintCompilation && flags.state != unloaded) {
+    print_on(tty, flags.state == zombie ? "made zombie " : "made not entrant ");
     tty->cr();
   }
 }
 
 // Common functionality for both make_not_entrant and make_zombie
-void nmethod::make_not_entrant_or_zombie(int state) {
+bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
 
-  // Code for an on-stack-replacement nmethod is removed when a class gets unloaded.
-  // They never become zombie/non-entrant, so the nmethod sweeper will never remove
-  // them. Instead the entry_bci is set to InvalidOSREntryBci, so the osr nmethod
-  // will never be used anymore. That the nmethods only gets removed when class unloading
-  // happens, make life much simpler, since the nmethods are not just going to disappear
-  // out of the blue.
-  if (is_osr_method()) {
-    if (osr_entry_bci() != InvalidOSREntryBci) {
-      // only log this once
-      log_state_change(state);
-    }
-    invalidate_osr_method();
-    return;
-  }
-
-  // If the method is already zombie or set to the state we want, nothing to do
-  if (is_zombie() || (state == not_entrant && is_not_entrant())) {
-    return;
-  }
-
-  log_state_change(state);
+  bool was_alive = false;
 
   // Make sure the nmethod is not flushed in case of a safepoint in code below.
   nmethodLocker nml(this);
 
   {
+    // If the method is already zombie there is nothing to do
+    if (is_zombie()) {
+      return false;
+    }
+
+    // invalidate osr nmethod before acquiring the patching lock since
+    // they both acquire leaf locks and we don't want a deadlock.
+    // This logic is equivalent to the logic below for patching the
+    // verified entry point of regular methods.
+    if (is_osr_method()) {
+      // this effectively makes the osr nmethod not entrant
+      invalidate_osr_method();
+    }
+
     // Enter critical section.  Does not block for safepoint.
     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+
+    if (flags.state == state) {
+      // another thread already performed this transition so nothing
+      // to do, but return false to indicate this.
+      return false;
+    }
+
     // The caller can be calling the method statically or through an inline
     // cache call.
-    if (!is_not_entrant()) {
+    if (!is_osr_method() && !is_not_entrant()) {
       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
                   SharedRuntime::get_handle_wrong_method_stub());
       assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
@@ -1215,8 +1225,14 @@
       assert(state == not_entrant, "other cases may need to be handled differently");
     }
 
+    was_alive = is_in_use(); // Read state under lock
+
     // Change state
     flags.state = state;
+
+    // Log the transition once
+    log_state_change();
+
   } // leave critical region under Patching_lock
 
   if (state == not_entrant) {
@@ -1237,9 +1253,11 @@
     mark_as_seen_on_stack();
   }
 
-  // It's a true state change, so mark the method as decompiled.
-  inc_decompile_count();
-
+  if (was_alive) {
+    // It's a true state change, so mark the method as decompiled.
+    // Do it only for transition from alive.
+    inc_decompile_count();
+  }
 
   // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
   // and it hasn't already been reported for this nmethod then report it now.
@@ -1268,7 +1286,7 @@
 
   // Check whether method got unloaded at a safepoint before this,
   // if so we can skip the flushing steps below
-  if (method() == NULL) return;
+  if (method() == NULL) return true;
 
   // Remove nmethod from method.
   // We need to check if both the _code and _from_compiled_code_entry_point
@@ -1282,6 +1300,8 @@
     HandleMark hm;
     method()->clear_code();
   }
+
+  return true;
 }
 
 
@@ -1303,7 +1323,8 @@
   // completely deallocate this method
   EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
   if (PrintMethodFlushing) {
-    tty->print_cr("*flushing nmethod " INTPTR_FORMAT ". Live blobs: %d", this, CodeCache::nof_blobs());
+    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
+        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
   }
 
   // We need to deallocate any ExceptionCache data.
@@ -1321,6 +1342,10 @@
     CodeCache::drop_scavenge_root_nmethod(this);
   }
 
+  if (is_speculatively_disconnected()) {
+    CodeCache::remove_saved_code(this);
+  }
+
   ((CodeBlob*)(this))->flush();
 
   CodeCache::free(this);
@@ -1715,9 +1740,9 @@
   if (!method()->is_native()) {
     SimpleScopeDesc ssd(this, fr.pc());
     Bytecode_invoke* call = Bytecode_invoke_at(ssd.method(), ssd.bci());
-    bool is_static = call->is_invokestatic();
+    bool has_receiver = call->has_receiver();
     symbolOop signature = call->signature();
-    fr.oops_compiled_arguments_do(signature, is_static, reg_map, f);
+    fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
   }
 }
 
@@ -1754,6 +1779,14 @@
          "must end with a sentinel");
 #endif //ASSERT
 
+  // Search for MethodHandle invokes and tag the nmethod.
+  for (int i = 0; i < count; i++) {
+    if (pcs[i].is_method_handle_invoke()) {
+      set_has_method_handle_invokes(true);
+      break;
+    }
+  }
+
   int size = count * sizeof(PcDesc);
   assert(scopes_pcs_size() >= size, "oob");
   memcpy(scopes_pcs_begin(), pcs, size);
@@ -1978,7 +2011,10 @@
     print_pcs();
   }
 #endif
-  guarantee(cont_offset != 0, "unhandled implicit exception in compiled code");
+  if (cont_offset == 0) {
+    // Let the normal error handling report the exception
+    return NULL;
+  }
   return instructions_begin() + cont_offset;
 }
 
@@ -2014,9 +2050,33 @@
   guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
 }
 
-bool nmethod::is_deopt_pc(address pc) {
-  bool ret =  pc == deopt_handler_begin();
-  return ret;
+
+// -----------------------------------------------------------------------------
+// nmethod::get_deopt_original_pc
+//
+// Return the original PC for the given PC if:
+// (a) the given PC belongs to a nmethod and
+// (b) it is a deopt PC
+address nmethod::get_deopt_original_pc(const frame* fr) {
+  if (fr->cb() == NULL)  return NULL;
+
+  nmethod* nm = fr->cb()->as_nmethod_or_null();
+  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
+    return nm->get_original_pc(fr);
+
+  return NULL;
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandle
+
+bool nmethod::is_method_handle_return(address return_pc) {
+  if (!has_method_handle_invokes())  return false;
+  PcDesc* pd = pc_desc_at(return_pc);
+  if (pd == NULL)
+    return false;
+  return pd->is_method_handle_invoke();
 }
 
 
@@ -2100,7 +2160,8 @@
   PcDesc* pd = pc_desc_at(ic->end_of_call());
   assert(pd != NULL, "PcDesc must exist");
   for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
-                                     pd->obj_decode_offset(), pd->should_reexecute());
+                                     pd->obj_decode_offset(), pd->should_reexecute(),
+                                     pd->return_oop());
        !sd->is_top(); sd = sd->sender()) {
     sd->verify();
   }
@@ -2365,11 +2426,115 @@
   PcDesc* p = pc_desc_near(begin+1);
   if (p != NULL && p->real_pc(this) <= end) {
     return new ScopeDesc(this, p->scope_decode_offset(),
-                         p->obj_decode_offset(), p->should_reexecute());
+                         p->obj_decode_offset(), p->should_reexecute(),
+                         p->return_oop());
   }
   return NULL;
 }
 
+void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) {
+  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
+  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
+  if (block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
+  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
+  if (block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
+  if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
+  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
+  if (block_begin == entry_point()) {
+    methodHandle m = method();
+    if (m.not_null()) {
+      stream->print("  # ");
+      m->print_value_on(stream);
+      stream->cr();
+    }
+    if (m.not_null() && !is_osr_method()) {
+      ResourceMark rm;
+      int sizeargs = m->size_of_parameters();
+      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
+      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
+      {
+        int sig_index = 0;
+        if (!m->is_static())
+          sig_bt[sig_index++] = T_OBJECT; // 'this'
+        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
+          BasicType t = ss.type();
+          sig_bt[sig_index++] = t;
+          if (type2size[t] == 2) {
+            sig_bt[sig_index++] = T_VOID;
+          } else {
+            assert(type2size[t] == 1, "size is 1 or 2");
+          }
+        }
+        assert(sig_index == sizeargs, "");
+      }
+      const char* spname = "sp"; // make arch-specific?
+      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
+      int stack_slot_offset = this->frame_size() * wordSize;
+      int tab1 = 14, tab2 = 24;
+      int sig_index = 0;
+      int arg_index = (m->is_static() ? 0 : -1);
+      bool did_old_sp = false;
+      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
+        bool at_this = (arg_index == -1);
+        bool at_old_sp = false;
+        BasicType t = (at_this ? T_OBJECT : ss.type());
+        assert(t == sig_bt[sig_index], "sigs in sync");
+        if (at_this)
+          stream->print("  # this: ");
+        else
+          stream->print("  # parm%d: ", arg_index);
+        stream->move_to(tab1);
+        VMReg fst = regs[sig_index].first();
+        VMReg snd = regs[sig_index].second();
+        if (fst->is_reg()) {
+          stream->print("%s", fst->name());
+          if (snd->is_valid())  {
+            stream->print(":%s", snd->name());
+          }
+        } else if (fst->is_stack()) {
+          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
+          if (offset == stack_slot_offset)  at_old_sp = true;
+          stream->print("[%s+0x%x]", spname, offset);
+        } else {
+          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
+        }
+        stream->print(" ");
+        stream->move_to(tab2);
+        stream->print("= ");
+        if (at_this) {
+          m->method_holder()->print_value_on(stream);
+        } else {
+          bool did_name = false;
+          if (!at_this && ss.is_object()) {
+            symbolOop name = ss.as_symbol_or_null();
+            if (name != NULL) {
+              name->print_value_on(stream);
+              did_name = true;
+            }
+          }
+          if (!did_name)
+            stream->print("%s", type2name(t));
+        }
+        if (at_old_sp) {
+          stream->print("  (%s of caller)", spname);
+          did_old_sp = true;
+        }
+        stream->cr();
+        sig_index += type2size[t];
+        arg_index += 1;
+        if (!at_this)  ss.next();
+      }
+      if (!did_old_sp) {
+        stream->print("  # ");
+        stream->move_to(tab1);
+        stream->print("[%s+0x%x]", spname, stack_slot_offset);
+        stream->print("  (%s of caller)", spname);
+        stream->cr();
+      }
+    }
+  }
+}
+
 void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
   // First, find an oopmap in (begin, end].
   // We use the odd half-closed interval so that oop maps and scope descs
--- a/src/share/vm/code/nmethod.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/nmethod.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,18 +81,21 @@
 
 struct nmFlags {
   friend class VMStructs;
-  unsigned int version:8;                 // version number (0 = first version)
-  unsigned int level:4;                   // optimization level
-  unsigned int age:4;                     // age (in # of sweep steps)
+  unsigned int version:8;                    // version number (0 = first version)
+  unsigned int level:4;                      // optimization level
+  unsigned int age:4;                        // age (in # of sweep steps)
 
-  unsigned int state:2;                   // {alive, zombie, unloaded)
+  unsigned int state:2;                      // {alive, zombie, unloaded)
 
-  unsigned int isUncommonRecompiled:1;    // recompiled because of uncommon trap?
-  unsigned int isToBeRecompiled:1;        // to be recompiled as soon as it matures
-  unsigned int hasFlushedDependencies:1;  // Used for maintenance of dependencies
-  unsigned int markedForReclamation:1;    // Used by NMethodSweeper
+  unsigned int isUncommonRecompiled:1;       // recompiled because of uncommon trap?
+  unsigned int isToBeRecompiled:1;           // to be recompiled as soon as it matures
+  unsigned int hasFlushedDependencies:1;     // Used for maintenance of dependencies
+  unsigned int markedForReclamation:1;       // Used by NMethodSweeper
 
-  unsigned int has_unsafe_access:1;       // May fault due to unsafe access.
+  unsigned int has_unsafe_access:1;          // May fault due to unsafe access.
+  unsigned int has_method_handle_invokes:1;  // Has this method MethodHandle invokes?
+
+  unsigned int speculatively_disconnected:1; // Marked for potential unload
 
   void clear();
 };
@@ -136,6 +139,7 @@
   // To support simple linked-list chaining of nmethods:
   nmethod*  _osr_link;         // from instanceKlass::osr_nmethods_head
   nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
+  nmethod*  _saved_nmethod_link; // from CodeCache::speculatively_disconnect
 
   static nmethod* volatile _oops_do_mark_nmethods;
   nmethod*        volatile _oops_do_mark_link;
@@ -144,8 +148,12 @@
 
   // Offsets for different nmethod parts
   int _exception_offset;
-  // All deoptee's will resume execution at this location described by this offset
+  // All deoptee's will resume execution at this location described by
+  // this offset.
   int _deoptimize_offset;
+  // All deoptee's at a MethodHandle call site will resume execution
+  // at this location described by this offset.
+  int _deoptimize_mh_offset;
 #ifdef HAVE_DTRACE_H
   int _trap_offset;
 #endif // def HAVE_DTRACE_H
@@ -252,7 +260,9 @@
   void* operator new(size_t size, int nmethod_size);
 
   const char* reloc_string_for(u_char* begin, u_char* end);
-  void make_not_entrant_or_zombie(int state);
+  // Returns true if this thread changed the state of the nmethod or
+  // false if another thread performed the transition.
+  bool make_not_entrant_or_zombie(unsigned int state);
   void inc_decompile_count();
 
   // used to check that writes to nmFlags are done consistently.
@@ -326,24 +336,25 @@
   bool is_compiled_by_c2() const;
 
   // boundaries for different parts
-  address code_begin         () const             { return _entry_point; }
-  address code_end           () const             { return           header_begin() + _stub_offset          ; }
-  address exception_begin    () const             { return           header_begin() + _exception_offset     ; }
-  address deopt_handler_begin() const             { return           header_begin() + _deoptimize_offset    ; }
-  address stub_begin         () const             { return           header_begin() + _stub_offset          ; }
-  address stub_end           () const             { return           header_begin() + _consts_offset        ; }
-  address consts_begin       () const             { return           header_begin() + _consts_offset        ; }
-  address consts_end         () const             { return           header_begin() + _scopes_data_offset   ; }
-  address scopes_data_begin  () const             { return           header_begin() + _scopes_data_offset   ; }
-  address scopes_data_end    () const             { return           header_begin() + _scopes_pcs_offset    ; }
-  PcDesc* scopes_pcs_begin   () const             { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
-  PcDesc* scopes_pcs_end     () const             { return (PcDesc*)(header_begin() + _dependencies_offset); }
-  address dependencies_begin () const             { return           header_begin() + _dependencies_offset ; }
-  address dependencies_end   () const             { return           header_begin() + _handler_table_offset ; }
-  address handler_table_begin() const             { return           header_begin() + _handler_table_offset ; }
-  address handler_table_end  () const             { return           header_begin() + _nul_chk_table_offset   ; }
-  address nul_chk_table_begin() const             { return           header_begin() + _nul_chk_table_offset ; }
-  address nul_chk_table_end  () const             { return           header_begin() + _nmethod_end_offset   ; }
+  address code_begin            () const          { return _entry_point; }
+  address code_end              () const          { return           header_begin() + _stub_offset          ; }
+  address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
+  address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
+  address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
+  address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
+  address stub_end              () const          { return           header_begin() + _consts_offset        ; }
+  address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
+  address consts_end            () const          { return           header_begin() + _scopes_data_offset   ; }
+  address scopes_data_begin     () const          { return           header_begin() + _scopes_data_offset   ; }
+  address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
+  PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
+  PcDesc* scopes_pcs_end        () const          { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
+  address dependencies_begin    () const          { return           header_begin() + _dependencies_offset  ; }
+  address dependencies_end      () const          { return           header_begin() + _handler_table_offset ; }
+  address handler_table_begin   () const          { return           header_begin() + _handler_table_offset ; }
+  address handler_table_end     () const          { return           header_begin() + _nul_chk_table_offset ; }
+  address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
+  address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
 
   int code_size         () const                  { return      code_end         () -      code_begin         (); }
   int stub_size         () const                  { return      stub_end         () -      stub_begin         (); }
@@ -375,10 +386,12 @@
   bool  is_zombie() const                         { return flags.state == zombie; }
   bool  is_unloaded() const                       { return flags.state == unloaded;   }
 
-  // Make the nmethod non entrant. The nmethod will continue to be alive.
-  // It is used when an uncommon trap happens.
-  void  make_not_entrant()                        { make_not_entrant_or_zombie(not_entrant); }
-  void  make_zombie()                             { make_not_entrant_or_zombie(zombie); }
+  // Make the nmethod non entrant. The nmethod will continue to be
+  // alive.  It is used when an uncommon trap happens.  Returns true
+  // if this thread changed the state of the nmethod or false if
+  // another thread performed the transition.
+  bool  make_not_entrant()                        { return make_not_entrant_or_zombie(not_entrant); }
+  bool  make_zombie()                             { return make_not_entrant_or_zombie(zombie); }
 
   // used by jvmti to track if the unload event has been reported
   bool  unload_reported()                         { return _unload_reported; }
@@ -405,6 +418,12 @@
   bool  has_unsafe_access() const                 { return flags.has_unsafe_access; }
   void  set_has_unsafe_access(bool z)             { flags.has_unsafe_access = z; }
 
+  bool  has_method_handle_invokes() const         { return flags.has_method_handle_invokes; }
+  void  set_has_method_handle_invokes(bool z)     { flags.has_method_handle_invokes = z; }
+
+  bool  is_speculatively_disconnected() const     { return flags.speculatively_disconnected; }
+  void  set_speculatively_disconnected(bool z)     { flags.speculatively_disconnected = z; }
+
   int   level() const                             { return flags.level; }
   void  set_level(int newLevel)                   { check_safepoint(); flags.level = newLevel; }
 
@@ -429,6 +448,9 @@
   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
 
+  nmethod* saved_nmethod_link() const                  { return _saved_nmethod_link; }
+  void     set_saved_nmethod_link(nmethod *n)          { _saved_nmethod_link = n; }
+
  public:
 
   // Sweeper support
@@ -507,7 +529,7 @@
  private:
   ScopeDesc* scope_desc_in(address begin, address end);
 
-  address* orig_pc_addr(const frame* fr ) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
+  address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
 
   PcDesc* find_pc_desc_internal(address pc, bool approximate);
 
@@ -530,13 +552,20 @@
   void copy_scopes_pcs(PcDesc* pcs, int count);
   void copy_scopes_data(address buffer, int size);
 
-  // deopt
-  // return true is the pc is one would expect if the frame is being deopted.
-  bool is_deopt_pc(address pc);
+  // Deopt
+  // Return true is the PC is one would expect if the frame is being deopted.
+  bool is_deopt_pc      (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
+  bool is_deopt_entry   (address pc) { return pc == deopt_handler_begin(); }
+  bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
   // Accessor/mutator for the original pc of a frame before a frame was deopted.
   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 
+  static address get_deopt_original_pc(const frame* fr);
+
+  // MethodHandle
+  bool is_method_handle_return(address return_pc);
+
   // jvmti support:
   void post_compiled_method_load_event();
 
@@ -563,7 +592,14 @@
   // Logging
   void log_identity(xmlStream* log) const;
   void log_new_nmethod() const;
-  void log_state_change(int state) const;
+  void log_state_change() const;
+
+  // Prints block-level comments, including nmethod specific block labels:
+  virtual void print_block_comment(outputStream* stream, address block_begin) {
+    print_nmethod_labels(stream, block_begin);
+    CodeBlob::print_block_comment(stream, block_begin);
+  }
+  void print_nmethod_labels(outputStream* stream, address block_begin);
 
   // Prints a comment for one native instruction (reloc info, pc desc)
   void print_code_comment_on(outputStream* st, int column, address begin, address end);
--- a/src/share/vm/code/pcDesc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/pcDesc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,8 @@
     tty->print("  ");
     sd->method()->print_short_name(tty);
     tty->print("  @%d", sd->bci());
-    tty->print("  reexecute=%s", sd->should_reexecute()?"true":"false");
+    if (sd->should_reexecute())
+      tty->print("  reexecute=true");
     tty->cr();
   }
 #endif
--- a/src/share/vm/code/pcDesc.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/pcDesc.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -38,6 +38,8 @@
     int word;
     struct {
       unsigned int reexecute: 1;
+      unsigned int is_method_handle_invoke: 1;
+      unsigned int return_oop: 1;
     } bits;
     bool operator ==(const PcDescFlags& other) { return word == other.word; }
   } _flags;
@@ -72,6 +74,12 @@
       _flags == pd->_flags;
   }
 
+  bool     is_method_handle_invoke()       const { return _flags.bits.is_method_handle_invoke;     }
+  void set_is_method_handle_invoke(bool z)       {        _flags.bits.is_method_handle_invoke = z; }
+
+  bool     return_oop()                    const { return _flags.bits.return_oop;     }
+  void set_return_oop(bool z)                    {        _flags.bits.return_oop = z; }
+
   // Returns the real pc
   address real_pc(const nmethod* code) const;
 
--- a/src/share/vm/code/scopeDesc.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/scopeDesc.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -26,19 +26,21 @@
 # include "incls/_scopeDesc.cpp.incl"
 
 
-ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute) {
+ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool return_oop) {
   _code          = code;
   _decode_offset = decode_offset;
   _objects       = decode_object_values(obj_decode_offset);
   _reexecute     = reexecute;
+  _return_oop    = return_oop;
   decode_body();
 }
 
-ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute) {
+ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool return_oop) {
   _code          = code;
   _decode_offset = decode_offset;
   _objects       = decode_object_values(DebugInformationRecorder::serialized_null);
   _reexecute     = reexecute;
+  _return_oop    = return_oop;
   decode_body();
 }
 
@@ -48,6 +50,7 @@
   _decode_offset = parent->_sender_decode_offset;
   _objects       = parent->_objects;
   _reexecute     = false; //reexecute only applies to the first scope
+  _return_oop    = false;
   decode_body();
 }
 
--- a/src/share/vm/code/scopeDesc.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/code/scopeDesc.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -52,17 +52,18 @@
 class ScopeDesc : public ResourceObj {
  public:
   // Constructor
-  ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute);
+  ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool return_oop);
 
   // Calls above, giving default value of "serialized_null" to the
   // "obj_decode_offset" argument.  (We don't use a default argument to
   // avoid a .hpp-.hpp dependency.)
-  ScopeDesc(const nmethod* code, int decode_offset, bool reexecute);
+  ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool return_oop);
 
   // JVM state
   methodHandle method()   const { return _method; }
   int          bci()      const { return _bci;    }
   bool should_reexecute() const { return _reexecute; }
+  bool return_oop()       const { return _return_oop; }
 
   GrowableArray<ScopeValue*>*   locals();
   GrowableArray<ScopeValue*>*   expressions();
@@ -88,6 +89,7 @@
   methodHandle  _method;
   int           _bci;
   bool          _reexecute;
+  bool          _return_oop;
 
   // Decoding offsets
   int _decode_offset;
--- a/src/share/vm/compiler/compileBroker.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/compiler/compileBroker.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,6 +69,7 @@
 
 bool CompileBroker::_initialized = false;
 volatile bool CompileBroker::_should_block = false;
+volatile jint CompileBroker::_should_compile_new_jobs = run_compilation;
 
 // The installed compiler(s)
 AbstractCompiler* CompileBroker::_compilers[2];
@@ -986,6 +987,13 @@
       return method_code;
     }
     if (method->is_not_compilable(comp_level)) return NULL;
+
+    nmethod* saved = CodeCache::find_and_remove_saved_code(method());
+    if (saved != NULL) {
+      method->set_code(method, saved);
+      return saved;
+    }
+
   } else {
     // osr compilation
 #ifndef TIERED
@@ -1037,6 +1045,14 @@
     method->jmethod_id();
   }
 
+  // If the compiler is shut off due to code cache flushing or otherwise,
+  // fail out now so blocking compiles dont hang the java thread
+  if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
+    method->invocation_counter()->decay();
+    method->backedge_counter()->decay();
+    return NULL;
+  }
+
   // do the compilation
   if (method->is_native()) {
     if (!PreferInterpreterNativeStubs) {
@@ -1116,7 +1132,7 @@
   // the specified level
   if (is_native &&
       (!CICompileNatives || !compiler(comp_level)->supports_native())) {
-    method->set_not_compilable();
+    method->set_not_compilable_quietly();
     return true;
   }
 
@@ -1140,7 +1156,7 @@
       method->print_short_name(tty);
       tty->cr();
     }
-    method->set_not_compilable();
+    method->set_not_compilable_quietly();
   }
 
   return false;
@@ -1173,7 +1189,7 @@
   }
 
   // Method was not in the appropriate compilation range.
-  method->set_not_compilable();
+  method->set_not_compilable_quietly();
   return 0;
 }
 
@@ -1325,26 +1341,13 @@
     {
       // We need this HandleMark to avoid leaking VM handles.
       HandleMark hm(thread);
+
       if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
-        // The CodeCache is full.  Print out warning and disable compilation.
-        UseInterpreter = true;
-        if (UseCompiler || AlwaysCompileLoopMethods ) {
-          if (log != NULL) {
-            log->begin_elem("code_cache_full");
-            log->stamp();
-            log->end_elem();
-          }
-#ifndef PRODUCT
-          warning("CodeCache is full. Compiler has been disabled");
-          if (CompileTheWorld || ExitOnFullCodeCache) {
-            before_exit(thread);
-            exit_globals(); // will delete tty
-            vm_direct_exit(CompileTheWorld ? 0 : 1);
-          }
-#endif
-          UseCompiler               = false;
-          AlwaysCompileLoopMethods  = false;
-        }
+        // the code cache is really full
+        handle_full_code_cache();
+      } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
+        // Attempt to start cleaning the code cache while there is still a little headroom
+        NMethodSweeper::handle_full_code_cache(false);
       }
 
       CompileTask* task = queue->get();
@@ -1369,7 +1372,7 @@
       // Never compile a method if breakpoints are present in it
       if (method()->number_of_breakpoints() == 0) {
         // Compile the method.
-        if (UseCompiler || AlwaysCompileLoopMethods) {
+        if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
 #ifdef COMPILER1
           // Allow repeating compilations for the purpose of benchmarking
           // compile speed. This is not useful for customers.
@@ -1587,10 +1590,10 @@
     if (is_osr) {
       method->set_not_osr_compilable();
     } else {
-      method->set_not_compilable();
+      method->set_not_compilable_quietly();
     }
   } else if (compilable == ciEnv::MethodCompilable_not_at_tier) {
-    method->set_not_compilable(task->comp_level());
+    method->set_not_compilable_quietly(task->comp_level());
   }
 
   // Note that the queued_for_compilation bits are cleared without
@@ -1614,6 +1617,38 @@
 
 
 // ------------------------------------------------------------------
+// CompileBroker::handle_full_code_cache
+//
+// The CodeCache is full.  Print out warning and disable compilation or
+// try code cache cleaning so compilation can continue later.
+void CompileBroker::handle_full_code_cache() {
+  UseInterpreter = true;
+  if (UseCompiler || AlwaysCompileLoopMethods ) {
+    CompilerThread* thread = CompilerThread::current();
+    CompileLog* log = thread->log();
+    if (log != NULL) {
+      log->begin_elem("code_cache_full");
+      log->stamp();
+      log->end_elem();
+    }
+  #ifndef PRODUCT
+    warning("CodeCache is full. Compiler has been disabled");
+    if (CompileTheWorld || ExitOnFullCodeCache) {
+      before_exit(JavaThread::current());
+      exit_globals(); // will delete tty
+      vm_direct_exit(CompileTheWorld ? 0 : 1);
+    }
+  #endif
+    if (UseCodeCacheFlushing) {
+      NMethodSweeper::handle_full_code_cache(true);
+    } else {
+      UseCompiler               = false;
+      AlwaysCompileLoopMethods  = false;
+    }
+  }
+}
+
+// ------------------------------------------------------------------
 // CompileBroker::set_last_compile
 //
 // Record this compilation for debugging purposes.
@@ -1820,9 +1855,11 @@
                 CompileBroker::_t_standard_compilation.seconds(),
                 CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count);
   tty->print_cr("    On stack replacement   : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count);
-  compiler(CompLevel_fast_compile)->print_timers();
-  if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier)) {
-    compiler(CompLevel_highest_tier)->print_timers();
+
+  if (compiler(CompLevel_fast_compile)) {
+    compiler(CompLevel_fast_compile)->print_timers();
+    if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier))
+      compiler(CompLevel_highest_tier)->print_timers();
   }
 
   tty->cr();
--- a/src/share/vm/compiler/compileBroker.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/compiler/compileBroker.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -193,6 +193,9 @@
   static bool _initialized;
   static volatile bool _should_block;
 
+  // This flag can be used to stop compilation or turn it back on
+  static volatile jint _should_compile_new_jobs;
+
   // The installed compiler(s)
   static AbstractCompiler* _compilers[2];
 
@@ -319,6 +322,7 @@
 
   static void compiler_thread_loop();
 
+  static uint get_compilation_id() { return _compilation_id; }
   static bool is_idle();
 
   // Set _should_block.
@@ -328,6 +332,20 @@
   // Call this from the compiler at convenient points, to poll for _should_block.
   static void maybe_block();
 
+  enum {
+    // Flags for toggling compiler activity
+    stop_compilation = 0,
+    run_compilation  = 1
+  };
+
+  static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
+  static bool set_should_compile_new_jobs(jint new_state) {
+    // Return success if the current caller set it
+    jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
+    return (old == (1-new_state));
+  }
+  static void handle_full_code_cache();
+
   // Return total compilation ticks
   static jlong total_compilation_ticks() {
     return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0;
--- a/src/share/vm/compiler/compilerOracle.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/compiler/compilerOracle.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -392,18 +392,18 @@
 };
 
 static MethodMatcher::Mode check_mode(char name[], const char*& error_msg) {
-  if (strcmp(name, "*") == 0) return MethodMatcher::Any;
-
   int match = MethodMatcher::Exact;
-  if (name[0] == '*') {
+  while (name[0] == '*') {
     match |= MethodMatcher::Suffix;
     strcpy(name, name + 1);
   }
 
+  if (strcmp(name, "*") == 0) return MethodMatcher::Any;
+
   size_t len = strlen(name);
-  if (len > 0 && name[len - 1] == '*') {
+  while (len > 0 && name[len - 1] == '*') {
     match |= MethodMatcher::Prefix;
-    name[len - 1] = '\0';
+    name[--len] = '\0';
   }
 
   if (strstr(name, "*") != NULL) {
@@ -610,6 +610,14 @@
   CompilerOracle::parse_from_string(CompileCommand, CompilerOracle::parse_from_line);
   CompilerOracle::parse_from_string(CompileOnly, CompilerOracle::parse_compile_only);
   CompilerOracle::parse_from_file();
+  if (lists[PrintCommand] != NULL) {
+    if (PrintAssembly) {
+      warning("CompileCommand and/or .hotspot_compiler file contains 'print' commands, but PrintAssembly is also enabled");
+    } else if (FLAG_IS_DEFAULT(DebugNonSafepoints)) {
+      warning("printing of assembly code is enabled; turning on DebugNonSafepoints to gain additional output");
+      DebugNonSafepoints = true;
+    }
+  }
 }
 
 
--- a/src/share/vm/compiler/disassembler.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/compiler/disassembler.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -151,8 +151,10 @@
     outputStream* st = output();
     if (_print_bytes && pc > pc0)
       print_insn_bytes(pc0, pc);
-    if (_nm != NULL)
+    if (_nm != NULL) {
       _nm->print_code_comment_on(st, COMMENT_COLUMN, pc0, pc);
+      // this calls reloc_string_for which calls oop::print_value_on
+    }
 
     // Output pc bucket ticks if we have any
     if (total_ticks() != 0) {
@@ -273,8 +275,15 @@
     oop obj;
     if (_nm != NULL
         && (obj = _nm->embeddedOop_at(cur_insn())) != NULL
-        && (address) obj == adr) {
+        && (address) obj == adr
+        && Universe::heap()->is_in(obj)
+        && Universe::heap()->is_in(obj->klass())) {
+      julong c = st->count();
       obj->print_value_on(st);
+      if (st->count() == c) {
+        // No output.  (Can happen in product builds.)
+        st->print("(a %s)", Klass::cast(obj->klass())->external_name());
+      }
       return;
     }
   }
@@ -286,17 +295,9 @@
 void decode_env::print_insn_labels() {
   address p = cur_insn();
   outputStream* st = output();
-  nmethod* nm = _nm;
-  if (nm != NULL) {
-    if (p == nm->entry_point())             st->print_cr("[Entry Point]");
-    if (p == nm->verified_entry_point())    st->print_cr("[Verified Entry Point]");
-    if (p == nm->exception_begin())         st->print_cr("[Exception Handler]");
-    if (p == nm->stub_begin())              st->print_cr("[Stub Code]");
-    if (p == nm->consts_begin())            st->print_cr("[Constants]");
-  }
   CodeBlob* cb = _code;
   if (cb != NULL) {
-    cb->print_block_comment(st, (intptr_t)(p - cb->instructions_begin()));
+    cb->print_block_comment(st, p);
   }
   if (_print_pc) {
     st->print("  " INTPTR_FORMAT ": ", (intptr_t) p);
--- a/src/share/vm/compiler/methodLiveness.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/compiler/methodLiveness.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -782,6 +782,7 @@
     case Bytecodes::_invokespecial:
     case Bytecodes::_invokestatic:
     case Bytecodes::_invokeinterface:
+    case Bytecodes::_invokedynamic:
     case Bytecodes::_newarray:
     case Bytecodes::_anewarray:
     case Bytecodes::_checkcast:
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -62,12 +62,13 @@
   tl->link_head(tc);
   tl->link_tail(tc);
   tl->set_count(1);
-  tl->init_statistics();
+  tl->init_statistics(true /* split_birth */);
   tl->setParent(NULL);
   tl->setLeft(NULL);
   tl->setRight(NULL);
   return tl;
 }
+
 TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) {
   TreeChunk* tc = (TreeChunk*) addr;
   assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
@@ -267,6 +268,31 @@
   return retTC;
 }
 
+// Returns the block with the largest heap address amongst
+// those in the list for this size; potentially slow and expensive,
+// use with caution!
+TreeChunk* TreeList::largest_address() {
+  guarantee(head() != NULL, "The head of the list cannot be NULL");
+  FreeChunk* fc = head()->next();
+  TreeChunk* retTC;
+  if (fc == NULL) {
+    retTC = head_as_TreeChunk();
+  } else {
+    // walk down the list and return the one with the highest
+    // heap address among chunks of this size.
+    FreeChunk* last = fc;
+    while (fc->next() != NULL) {
+      if ((HeapWord*)last < (HeapWord*)fc) {
+        last = fc;
+      }
+      fc = fc->next();
+    }
+    retTC = TreeChunk::as_TreeChunk(last);
+  }
+  assert(retTC->list() == this, "Wrong type of chunk.");
+  return retTC;
+}
+
 BinaryTreeDictionary::BinaryTreeDictionary(MemRegion mr, bool splay):
   _splay(splay)
 {
@@ -379,7 +405,7 @@
             break;
           }
           // The evm code reset the hint of the candidate as
-          // at an interrim point.  Why?  Seems like this leaves
+          // at an interim point.  Why?  Seems like this leaves
           // the hint pointing to a list that didn't work.
           // curTL->set_hint(hintTL->size());
         }
@@ -436,7 +462,7 @@
   TreeList *curTL = root();
   if (curTL != NULL) {
     while(curTL->right() != NULL) curTL = curTL->right();
-    return curTL->first_available();
+    return curTL->largest_address();
   } else {
     return NULL;
   }
@@ -664,7 +690,7 @@
     }
   }
   TreeChunk* tc = TreeChunk::as_TreeChunk(fc);
-  // This chunk is being returned to the binary try.  It's embedded
+  // This chunk is being returned to the binary tree.  Its embedded
   // TreeList should be unused at this point.
   tc->initialize();
   if (curTL != NULL) {          // exact match
@@ -807,6 +833,8 @@
 }
 
 bool BinaryTreeDictionary::coalDictOverPopulated(size_t size) {
+  if (FLSAlwaysCoalesceLarge) return true;
+
   TreeList* list_of_size = findList(size);
   // None of requested size implies overpopulated.
   return list_of_size == NULL || list_of_size->coalDesired() <= 0 ||
@@ -854,17 +882,20 @@
   double _percentage;
   float _inter_sweep_current;
   float _inter_sweep_estimate;
+  float _intra_sweep_estimate;
 
  public:
   BeginSweepClosure(double p, float inter_sweep_current,
-                              float inter_sweep_estimate) :
+                              float inter_sweep_estimate,
+                              float intra_sweep_estimate) :
    _percentage(p),
    _inter_sweep_current(inter_sweep_current),
-   _inter_sweep_estimate(inter_sweep_estimate) { }
+   _inter_sweep_estimate(inter_sweep_estimate),
+   _intra_sweep_estimate(intra_sweep_estimate) { }
 
   void do_list(FreeList* fl) {
     double coalSurplusPercent = _percentage;
-    fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate);
+    fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
     fl->set_coalDesired((ssize_t)((double)fl->desired() * coalSurplusPercent));
     fl->set_beforeSweep(fl->count());
     fl->set_bfrSurp(fl->surplus());
@@ -939,9 +970,10 @@
 }
 
 void BinaryTreeDictionary::beginSweepDictCensus(double coalSurplusPercent,
-  float inter_sweep_current, float inter_sweep_estimate) {
+  float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
   BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
-                                            inter_sweep_estimate);
+                                            inter_sweep_estimate,
+                                            intra_sweep_estimate);
   bsc.do_tree(root());
 }
 
@@ -1077,13 +1109,13 @@
 // Print census information - counts, births, deaths, etc.
 // for each list in the tree.  Also print some summary
 // information.
-class printTreeCensusClosure : public AscendTreeCensusClosure {
+class PrintTreeCensusClosure : public AscendTreeCensusClosure {
   int _print_line;
   size_t _totalFree;
   FreeList _total;
 
  public:
-  printTreeCensusClosure() {
+  PrintTreeCensusClosure() {
     _print_line = 0;
     _totalFree = 0;
   }
@@ -1113,7 +1145,7 @@
 
   gclog_or_tty->print("\nBinaryTree\n");
   FreeList::print_labels_on(gclog_or_tty, "size");
-  printTreeCensusClosure ptc;
+  PrintTreeCensusClosure ptc;
   ptc.do_tree(root());
 
   FreeList* total = ptc.total();
@@ -1130,6 +1162,38 @@
              /(total->desired() != 0 ? (double)total->desired() : 1.0));
 }
 
+class PrintFreeListsClosure : public AscendTreeCensusClosure {
+  outputStream* _st;
+  int _print_line;
+
+ public:
+  PrintFreeListsClosure(outputStream* st) {
+    _st = st;
+    _print_line = 0;
+  }
+  void do_list(FreeList* fl) {
+    if (++_print_line >= 40) {
+      FreeList::print_labels_on(_st, "size");
+      _print_line = 0;
+    }
+    fl->print_on(gclog_or_tty);
+    size_t sz = fl->size();
+    for (FreeChunk* fc = fl->head(); fc != NULL;
+         fc = fc->next()) {
+      _st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
+                    fc, (HeapWord*)fc + sz,
+                    fc->cantCoalesce() ? "\t CC" : "");
+    }
+  }
+};
+
+void BinaryTreeDictionary::print_free_lists(outputStream* st) const {
+
+  FreeList::print_labels_on(st, "size");
+  PrintFreeListsClosure pflc(st);
+  pflc.do_tree(root());
+}
+
 // Verify the following tree invariants:
 // . _root has no parent
 // . parent and child point to each other
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -42,9 +42,6 @@
   friend class AscendTreeCensusClosure;
   friend class DescendTreeCensusClosure;
   friend class DescendTreeSearchClosure;
-  TreeList* _parent;
-  TreeList* _left;
-  TreeList* _right;
 
  protected:
   TreeList* parent() const { return _parent; }
@@ -82,6 +79,11 @@
   // to a TreeChunk.
   TreeChunk* first_available();
 
+  // Returns the block with the largest heap address amongst
+  // those in the list for this size; potentially slow and expensive,
+  // use with caution!
+  TreeChunk* largest_address();
+
   // removeChunkReplaceIfNeeded() removes the given "tc" from the TreeList.
   // If "tc" is the first chunk in the list, it is also the
   // TreeList that is the node in the tree.  removeChunkReplaceIfNeeded()
@@ -254,8 +256,9 @@
   // Methods called at the beginning of a sweep to prepare the
   // statistics for the sweep.
   void       beginSweepDictCensus(double coalSurplusPercent,
-                                  float sweep_current,
-                                  float sweep_estimate);
+                                  float inter_sweep_current,
+                                  float inter_sweep_estimate,
+                                  float intra_sweep_estimate);
   // Methods called after the end of a sweep to modify the
   // statistics for the sweep.
   void       endSweepDictCensus(double splitSurplusPercent);
@@ -269,6 +272,7 @@
   // Print the statistcis for all the lists in the tree.  Also may
   // print out summaries.
   void       printDictCensus(void) const;
+  void       print_free_lists(outputStream* st) const;
 
   // For debugging.  Returns the sum of the _returnedBytes for
   // all lists in the tree.
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -32,7 +32,9 @@
 // threads. The second argument is in support of an extra locking
 // check for CFL spaces' free list locks.
 #ifndef PRODUCT
-void CMSLockVerifier::assert_locked(const Mutex* lock, const Mutex* p_lock) {
+void CMSLockVerifier::assert_locked(const Mutex* lock,
+                                    const Mutex* p_lock1,
+                                    const Mutex* p_lock2) {
   if (!Universe::is_fully_initialized()) {
     return;
   }
@@ -40,7 +42,7 @@
   Thread* myThread = Thread::current();
 
   if (lock == NULL) { // a "lock-free" structure, e.g. MUT, protected by CMS token
-    assert(p_lock == NULL, "Unexpected state");
+    assert(p_lock1 == NULL && p_lock2 == NULL, "Unexpected caller error");
     if (myThread->is_ConcurrentGC_thread()) {
       // This test might have to change in the future, if there can be
       // multiple peer CMS threads.  But for now, if we're testing the CMS
@@ -60,36 +62,39 @@
     return;
   }
 
-  if (ParallelGCThreads == 0) {
+  if (myThread->is_VM_thread()
+      || myThread->is_ConcurrentGC_thread()
+      || myThread->is_Java_thread()) {
+    // Make sure that we are holding the associated lock.
     assert_lock_strong(lock);
+    // The checking of p_lock is a spl case for CFLS' free list
+    // locks: we make sure that none of the parallel GC work gang
+    // threads are holding "sub-locks" of freeListLock(). We check only
+    // the parDictionaryAllocLock because the others are too numerous.
+    // This spl case code is somewhat ugly and any improvements
+    // are welcome.
+    assert(p_lock1 == NULL || !p_lock1->is_locked() || p_lock1->owned_by_self(),
+           "Possible race between this and parallel GC threads");
+    assert(p_lock2 == NULL || !p_lock2->is_locked() || p_lock2->owned_by_self(),
+           "Possible race between this and parallel GC threads");
+  } else if (myThread->is_GC_task_thread()) {
+    // Make sure that the VM or CMS thread holds lock on our behalf
+    // XXX If there were a concept of a gang_master for a (set of)
+    // gang_workers, we could have used the identity of that thread
+    // for checking ownership here; for now we just disjunct.
+    assert(lock->owner() == VMThread::vm_thread() ||
+           lock->owner() == ConcurrentMarkSweepThread::cmst(),
+           "Should be locked by VM thread or CMS thread on my behalf");
+    if (p_lock1 != NULL) {
+      assert_lock_strong(p_lock1);
+    }
+    if (p_lock2 != NULL) {
+      assert_lock_strong(p_lock2);
+    }
   } else {
-    if (myThread->is_VM_thread()
-        || myThread->is_ConcurrentGC_thread()
-        || myThread->is_Java_thread()) {
-      // Make sure that we are holding the associated lock.
-      assert_lock_strong(lock);
-      // The checking of p_lock is a spl case for CFLS' free list
-      // locks: we make sure that none of the parallel GC work gang
-      // threads are holding "sub-locks" of freeListLock(). We check only
-      // the parDictionaryAllocLock because the others are too numerous.
-      // This spl case code is somewhat ugly and any improvements
-      // are welcome XXX FIX ME!!
-      if (p_lock != NULL) {
-        assert(!p_lock->is_locked() || p_lock->owned_by_self(),
-               "Possible race between this and parallel GC threads");
-      }
-    } else if (myThread->is_GC_task_thread()) {
-      // Make sure that the VM or CMS thread holds lock on our behalf
-      // XXX If there were a concept of a gang_master for a (set of)
-      // gang_workers, we could have used the identity of that thread
-      // for checking ownership here; for now we just disjunct.
-      assert(lock->owner() == VMThread::vm_thread() ||
-             lock->owner() == ConcurrentMarkSweepThread::cmst(),
-             "Should be locked by VM thread or CMS thread on my behalf");
-    } else {
-      // Make sure we didn't miss some obscure corner case
-      ShouldNotReachHere();
-    }
+    // Make sure we didn't miss some other thread type calling into here;
+    // perhaps as a result of future VM evolution.
+    ShouldNotReachHere();
   }
 }
 #endif
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -29,8 +29,11 @@
 // the parallel threads.
 class CMSLockVerifier: AllStatic {
  public:
-  static void assert_locked(const Mutex* lock, const Mutex* p_lock)
+  static void assert_locked(const Mutex* lock, const Mutex* p_lock1, const Mutex* p_lock2)
     PRODUCT_RETURN;
+  static void assert_locked(const Mutex* lock, const Mutex* p_lock) {
+    assert_locked(lock, p_lock, NULL);
+  }
   static void assert_locked(const Mutex* lock) {
     assert_locked(lock, NULL);
   }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -62,18 +62,15 @@
   // implementation, namely, the simple binary tree (splaying
   // temporarily disabled).
   switch (dictionaryChoice) {
-    case FreeBlockDictionary::dictionaryBinaryTree:
-      _dictionary = new BinaryTreeDictionary(mr);
-      break;
     case FreeBlockDictionary::dictionarySplayTree:
     case FreeBlockDictionary::dictionarySkipList:
     default:
       warning("dictionaryChoice: selected option not understood; using"
               " default BinaryTreeDictionary implementation instead.");
+    case FreeBlockDictionary::dictionaryBinaryTree:
       _dictionary = new BinaryTreeDictionary(mr);
       break;
   }
-  splitBirth(mr.word_size());
   assert(_dictionary != NULL, "CMS dictionary initialization");
   // The indexed free lists are initially all empty and are lazily
   // filled in on demand. Initialize the array elements to NULL.
@@ -388,6 +385,105 @@
   return res;
 }
 
+void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
+const {
+  reportIndexedFreeListStatistics();
+  gclog_or_tty->print_cr("Layout of Indexed Freelists");
+  gclog_or_tty->print_cr("---------------------------");
+  FreeList::print_labels_on(st, "size");
+  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    _indexedFreeList[i].print_on(gclog_or_tty);
+    for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
+         fc = fc->next()) {
+      gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
+                          fc, (HeapWord*)fc + i,
+                          fc->cantCoalesce() ? "\t CC" : "");
+    }
+  }
+}
+
+void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
+const {
+  _promoInfo.print_on(st);
+}
+
+void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
+const {
+  _dictionary->reportStatistics();
+  st->print_cr("Layout of Freelists in Tree");
+  st->print_cr("---------------------------");
+  _dictionary->print_free_lists(st);
+}
+
+class BlkPrintingClosure: public BlkClosure {
+  const CMSCollector*             _collector;
+  const CompactibleFreeListSpace* _sp;
+  const CMSBitMap*                _live_bit_map;
+  const bool                      _post_remark;
+  outputStream*                   _st;
+public:
+  BlkPrintingClosure(const CMSCollector* collector,
+                     const CompactibleFreeListSpace* sp,
+                     const CMSBitMap* live_bit_map,
+                     outputStream* st):
+    _collector(collector),
+    _sp(sp),
+    _live_bit_map(live_bit_map),
+    _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
+    _st(st) { }
+  size_t do_blk(HeapWord* addr);
+};
+
+size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
+  size_t sz = _sp->block_size_no_stall(addr, _collector);
+  assert(sz != 0, "Should always be able to compute a size");
+  if (_sp->block_is_obj(addr)) {
+    const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
+    _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
+      addr,
+      dead ? "dead" : "live",
+      sz,
+      (!dead && CMSPrintObjectsInDump) ? ":" : ".");
+    if (CMSPrintObjectsInDump && !dead) {
+      oop(addr)->print_on(_st);
+      _st->print_cr("--------------------------------------");
+    }
+  } else { // free block
+    _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
+      addr, sz, CMSPrintChunksInDump ? ":" : ".");
+    if (CMSPrintChunksInDump) {
+      ((FreeChunk*)addr)->print_on(_st);
+      _st->print_cr("--------------------------------------");
+    }
+  }
+  return sz;
+}
+
+void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
+  outputStream* st) {
+  st->print_cr("\n=========================");
+  st->print_cr("Block layout in CMS Heap:");
+  st->print_cr("=========================");
+  BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
+  blk_iterate(&bpcl);
+
+  st->print_cr("\n=======================================");
+  st->print_cr("Order & Layout of Promotion Info Blocks");
+  st->print_cr("=======================================");
+  print_promo_info_blocks(st);
+
+  st->print_cr("\n===========================");
+  st->print_cr("Order of Indexed Free Lists");
+  st->print_cr("=========================");
+  print_indexed_free_lists(st);
+
+  st->print_cr("\n=================================");
+  st->print_cr("Order of Free Lists in Dictionary");
+  st->print_cr("=================================");
+  print_dictionary_free_lists(st);
+}
+
+
 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   assert_lock_strong(&_freelistLock);
   assert(PrintFLSStatistics != 0, "Reporting error");
@@ -449,37 +545,37 @@
   if (prevEnd != NULL) {
     // Resize the underlying block offset table.
     _bt.resize(pointer_delta(value, bottom()));
-  if (value <= prevEnd) {
-    assert(value >= unallocated_block(), "New end is below unallocated block");
-  } else {
-    // Now, take this new chunk and add it to the free blocks.
-    // Note that the BOT has not yet been updated for this block.
-    size_t newFcSize = pointer_delta(value, prevEnd);
-    // XXX This is REALLY UGLY and should be fixed up. XXX
-    if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
-      // Mark the boundary of the new block in BOT
-      _bt.mark_block(prevEnd, value);
-      // put it all in the linAB
-      if (ParallelGCThreads == 0) {
-        _smallLinearAllocBlock._ptr = prevEnd;
-        _smallLinearAllocBlock._word_size = newFcSize;
-        repairLinearAllocBlock(&_smallLinearAllocBlock);
-      } else { // ParallelGCThreads > 0
-        MutexLockerEx x(parDictionaryAllocLock(),
-                        Mutex::_no_safepoint_check_flag);
-        _smallLinearAllocBlock._ptr = prevEnd;
-        _smallLinearAllocBlock._word_size = newFcSize;
-        repairLinearAllocBlock(&_smallLinearAllocBlock);
+    if (value <= prevEnd) {
+      assert(value >= unallocated_block(), "New end is below unallocated block");
+    } else {
+      // Now, take this new chunk and add it to the free blocks.
+      // Note that the BOT has not yet been updated for this block.
+      size_t newFcSize = pointer_delta(value, prevEnd);
+      // XXX This is REALLY UGLY and should be fixed up. XXX
+      if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
+        // Mark the boundary of the new block in BOT
+        _bt.mark_block(prevEnd, value);
+        // put it all in the linAB
+        if (ParallelGCThreads == 0) {
+          _smallLinearAllocBlock._ptr = prevEnd;
+          _smallLinearAllocBlock._word_size = newFcSize;
+          repairLinearAllocBlock(&_smallLinearAllocBlock);
+        } else { // ParallelGCThreads > 0
+          MutexLockerEx x(parDictionaryAllocLock(),
+                          Mutex::_no_safepoint_check_flag);
+          _smallLinearAllocBlock._ptr = prevEnd;
+          _smallLinearAllocBlock._word_size = newFcSize;
+          repairLinearAllocBlock(&_smallLinearAllocBlock);
+        }
+        // Births of chunks put into a LinAB are not recorded.  Births
+        // of chunks as they are allocated out of a LinAB are.
+      } else {
+        // Add the block to the free lists, if possible coalescing it
+        // with the last free block, and update the BOT and census data.
+        addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
       }
-      // Births of chunks put into a LinAB are not recorded.  Births
-      // of chunks as they are allocated out of a LinAB are.
-    } else {
-      // Add the block to the free lists, if possible coalescing it
-      // with the last free block, and update the BOT and census data.
-      addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
     }
   }
-  }
 }
 
 class FreeListSpace_DCTOC : public Filtering_DCTOC {
@@ -732,7 +828,7 @@
 
 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
                                                   UpwardsObjectClosure* cl) {
-  assert_locked();
+  assert_locked(freelistLock());
   NOT_PRODUCT(verify_objects_initialized());
   Space::object_iterate_mem(mr, cl);
 }
@@ -1212,12 +1308,15 @@
 void CompactibleFreeListSpace::assert_locked() const {
   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
 }
+
+void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
+  CMSLockVerifier::assert_locked(lock);
+}
 #endif
 
 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
   // In the parallel case, the main thread holds the free list lock
   // on behalf the parallel threads.
-  assert_locked();
   FreeChunk* fc;
   {
     // If GC is parallel, this might be called by several threads.
@@ -1298,17 +1397,18 @@
     res = blk->_ptr;
     _bt.allocated(res, blk->_word_size);
   } else if (size + MinChunkSize <= blk->_refillSize) {
+    size_t sz = blk->_word_size;
     // Update _unallocated_block if the size is such that chunk would be
     // returned to the indexed free list.  All other chunks in the indexed
     // free lists are allocated from the dictionary so that _unallocated_block
     // has already been adjusted for them.  Do it here so that the cost
     // for all chunks added back to the indexed free lists.
-    if (blk->_word_size < SmallForDictionary) {
-      _bt.allocated(blk->_ptr, blk->_word_size);
+    if (sz < SmallForDictionary) {
+      _bt.allocated(blk->_ptr, sz);
     }
     // Return the chunk that isn't big enough, and then refill below.
-    addChunkToFreeLists(blk->_ptr, blk->_word_size);
-    _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
+    addChunkToFreeLists(blk->_ptr, sz);
+    splitBirth(sz);
     // Don't keep statistics on adding back chunk from a LinAB.
   } else {
     // A refilled block would not satisfy the request.
@@ -1376,11 +1476,13 @@
     res = getChunkFromIndexedFreeListHelper(size);
   }
   _bt.verify_not_unallocated((HeapWord*) res, size);
+  assert(res == NULL || res->size() == size, "Incorrect block size");
   return res;
 }
 
 FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
+CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
+  bool replenish) {
   assert_locked();
   FreeChunk* fc = NULL;
   if (size < SmallForDictionary) {
@@ -1398,54 +1500,66 @@
       // and replenishing indexed lists from the small linAB.
       //
       FreeChunk* newFc = NULL;
-      size_t replenish_size = CMSIndexedFreeListReplenish * size;
+      const size_t replenish_size = CMSIndexedFreeListReplenish * size;
       if (replenish_size < SmallForDictionary) {
         // Do not replenish from an underpopulated size.
         if (_indexedFreeList[replenish_size].surplus() > 0 &&
             _indexedFreeList[replenish_size].head() != NULL) {
-          newFc =
-            _indexedFreeList[replenish_size].getChunkAtHead();
-        } else {
+          newFc = _indexedFreeList[replenish_size].getChunkAtHead();
+        } else if (bestFitFirst()) {
           newFc = bestFitSmall(replenish_size);
         }
       }
-      if (newFc != NULL) {
-        splitDeath(replenish_size);
-      } else if (replenish_size > size) {
+      if (newFc == NULL && replenish_size > size) {
         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
-        newFc =
-          getChunkFromIndexedFreeListHelper(replenish_size);
+        newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
       }
+      // Note: The stats update re split-death of block obtained above
+      // will be recorded below precisely when we know we are going to
+      // be actually splitting it into more than one pieces below.
       if (newFc != NULL) {
-        assert(newFc->size() == replenish_size, "Got wrong size");
-        size_t i;
-        FreeChunk *curFc, *nextFc;
-        // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
-        // The last chunk is not added to the lists but is returned as the
-        // free chunk.
-        for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
-             i = 0;
-             i < (CMSIndexedFreeListReplenish - 1);
-             curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
-             i++) {
+        if  (replenish || CMSReplenishIntermediate) {
+          // Replenish this list and return one block to caller.
+          size_t i;
+          FreeChunk *curFc, *nextFc;
+          size_t num_blk = newFc->size() / size;
+          assert(num_blk >= 1, "Smaller than requested?");
+          assert(newFc->size() % size == 0, "Should be integral multiple of request");
+          if (num_blk > 1) {
+            // we are sure we will be splitting the block just obtained
+            // into multiple pieces; record the split-death of the original
+            splitDeath(replenish_size);
+          }
+          // carve up and link blocks 0, ..., num_blk - 2
+          // The last chunk is not added to the lists but is returned as the
+          // free chunk.
+          for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
+               i = 0;
+               i < (num_blk - 1);
+               curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
+               i++) {
+            curFc->setSize(size);
+            // Don't record this as a return in order to try and
+            // determine the "returns" from a GC.
+            _bt.verify_not_unallocated((HeapWord*) fc, size);
+            _indexedFreeList[size].returnChunkAtTail(curFc, false);
+            _bt.mark_block((HeapWord*)curFc, size);
+            splitBirth(size);
+            // Don't record the initial population of the indexed list
+            // as a split birth.
+          }
+
+          // check that the arithmetic was OK above
+          assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
+            "inconsistency in carving newFc");
           curFc->setSize(size);
-          // Don't record this as a return in order to try and
-          // determine the "returns" from a GC.
-          _bt.verify_not_unallocated((HeapWord*) fc, size);
-          _indexedFreeList[size].returnChunkAtTail(curFc, false);
           _bt.mark_block((HeapWord*)curFc, size);
           splitBirth(size);
-          // Don't record the initial population of the indexed list
-          // as a split birth.
+          fc = curFc;
+        } else {
+          // Return entire block to caller
+          fc = newFc;
         }
-
-        // check that the arithmetic was OK above
-        assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
-          "inconsistency in carving newFc");
-        curFc->setSize(size);
-        _bt.mark_block((HeapWord*)curFc, size);
-        splitBirth(size);
-        return curFc;
       }
     }
   } else {
@@ -1453,7 +1567,7 @@
     // replenish the indexed free list.
     fc = getChunkFromDictionaryExact(size);
   }
-  assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
+  // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
   return fc;
 }
 
@@ -1512,6 +1626,11 @@
   // adjust _unallocated_block downward, as necessary
   _bt.freed((HeapWord*)chunk, size);
   _dictionary->returnChunk(chunk);
+#ifndef PRODUCT
+  if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
+    TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
+  }
+#endif // PRODUCT
 }
 
 void
@@ -1525,6 +1644,11 @@
   } else {
     _indexedFreeList[size].returnChunkAtHead(fc);
   }
+#ifndef PRODUCT
+  if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
+     _indexedFreeList[size].verify_stats();
+  }
+#endif // PRODUCT
 }
 
 // Add chunk to end of last block -- if it's the largest
@@ -1537,7 +1661,6 @@
   HeapWord* chunk, size_t     size) {
   // check that the chunk does lie in this space!
   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
-  assert_locked();
   // One of the parallel gc task threads may be here
   // whilst others are allocating.
   Mutex* lock = NULL;
@@ -1991,24 +2114,26 @@
   return frag;
 }
 
-#define CoalSurplusPercent 1.05
-#define SplitSurplusPercent 1.10
-
 void CompactibleFreeListSpace::beginSweepFLCensus(
   float inter_sweep_current,
-  float inter_sweep_estimate) {
+  float inter_sweep_estimate,
+  float intra_sweep_estimate) {
   assert_locked();
   size_t i;
   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
     FreeList* fl    = &_indexedFreeList[i];
-    fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
-    fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
+    if (PrintFLSStatistics > 1) {
+      gclog_or_tty->print("size[%d] : ", i);
+    }
+    fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
+    fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
     fl->set_beforeSweep(fl->count());
     fl->set_bfrSurp(fl->surplus());
   }
-  _dictionary->beginSweepDictCensus(CoalSurplusPercent,
+  _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
                                     inter_sweep_current,
-                                    inter_sweep_estimate);
+                                    inter_sweep_estimate,
+                                    intra_sweep_estimate);
 }
 
 void CompactibleFreeListSpace::setFLSurplus() {
@@ -2017,7 +2142,7 @@
   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
     FreeList *fl = &_indexedFreeList[i];
     fl->set_surplus(fl->count() -
-                    (ssize_t)((double)fl->desired() * SplitSurplusPercent));
+                    (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
   }
 }
 
@@ -2048,6 +2173,11 @@
 }
 
 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
+  if (PrintFLSStatistics > 0) {
+    HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
+    gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
+                           largestAddr);
+  }
   setFLSurplus();
   setFLHints();
   if (PrintGC && PrintFLSCensus > 0) {
@@ -2055,7 +2185,7 @@
   }
   clearFLCensus();
   assert_locked();
-  _dictionary->endSweepDictCensus(SplitSurplusPercent);
+  _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
 }
 
 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
@@ -2312,13 +2442,18 @@
 }
 
 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
-  FreeChunk* fc =  _indexedFreeList[size].head();
+  FreeChunk* fc   =  _indexedFreeList[size].head();
+  FreeChunk* tail =  _indexedFreeList[size].tail();
+  size_t    num = _indexedFreeList[size].count();
+  size_t      n = 0;
   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
-  for (; fc != NULL; fc = fc->next()) {
+  for (; fc != NULL; fc = fc->next(), n++) {
     guarantee(fc->size() == size, "Size inconsistency");
     guarantee(fc->isFree(), "!free?");
     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
+    guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
   }
+  guarantee(n == num, "Incorrect count");
 }
 
 #ifndef PRODUCT
@@ -2516,11 +2651,41 @@
   _tracking = true;
 }
 
-void PromotionInfo::stopTrackingPromotions() {
+#define CMSPrintPromoBlockInfo 1
+
+void PromotionInfo::stopTrackingPromotions(uint worker_id) {
   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
          "spooling inconsistency?");
   _firstIndex = _nextIndex = 1;
   _tracking = false;
+  if (CMSPrintPromoBlockInfo > 1) {
+    print_statistics(worker_id);
+  }
+}
+
+void PromotionInfo::print_statistics(uint worker_id) const {
+  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
+         "Else will undercount");
+  assert(CMSPrintPromoBlockInfo > 0, "Else unnecessary call");
+  // Count the number of blocks and slots in the free pool
+  size_t slots  = 0;
+  size_t blocks = 0;
+  for (SpoolBlock* cur_spool = _spareSpool;
+       cur_spool != NULL;
+       cur_spool = cur_spool->nextSpoolBlock) {
+    // the first entry is just a self-pointer; indices 1 through
+    // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
+    guarantee((void*)cur_spool->displacedHdr == (void*)&cur_spool->displacedHdr,
+              "first entry of displacedHdr should be self-referential");
+    slots += cur_spool->bufferSize - 1;
+    blocks++;
+  }
+  if (_spoolHead != NULL) {
+    slots += _spoolHead->bufferSize - 1;
+    blocks++;
+  }
+  gclog_or_tty->print_cr(" [worker %d] promo_blocks = %d, promo_slots = %d ",
+                         worker_id, blocks, slots);
 }
 
 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
@@ -2584,15 +2749,84 @@
   guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
 }
 
+void PromotionInfo::print_on(outputStream* st) const {
+  SpoolBlock* curSpool = NULL;
+  size_t i = 0;
+  st->print_cr("start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
+               _firstIndex, _nextIndex);
+  for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
+       curSpool = curSpool->nextSpoolBlock) {
+    curSpool->print_on(st);
+    st->print_cr(" active ");
+    i++;
+  }
+  for (curSpool = _spoolTail; curSpool != NULL;
+       curSpool = curSpool->nextSpoolBlock) {
+    curSpool->print_on(st);
+    st->print_cr(" inactive ");
+    i++;
+  }
+  for (curSpool = _spareSpool; curSpool != NULL;
+       curSpool = curSpool->nextSpoolBlock) {
+    curSpool->print_on(st);
+    st->print_cr(" free ");
+    i++;
+  }
+  st->print_cr(SIZE_FORMAT " header spooling blocks", i);
+}
+
+void SpoolBlock::print_on(outputStream* st) const {
+  st->print("[" PTR_FORMAT "," PTR_FORMAT "), " SIZE_FORMAT " HeapWords -> " PTR_FORMAT,
+            this, (HeapWord*)displacedHdr + bufferSize,
+            bufferSize, nextSpoolBlock);
+}
+
+///////////////////////////////////////////////////////////////////////////
+// CFLS_LAB
+///////////////////////////////////////////////////////////////////////////
+
+#define VECTOR_257(x)                                                                                  \
+  /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
+  {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
+     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
+     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
+     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
+     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
+     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
+     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
+     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
+     x }
+
+// Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
+// OldPLABSize, whose static default is different; if overridden at the
+// command-line, this will get reinitialized via a call to
+// modify_initialization() below.
+AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
+  VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
+size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
+int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
 
 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
   _cfls(cfls)
 {
-  _blocks_to_claim = CMSParPromoteBlocksToClaim;
+  assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
        i < CompactibleFreeListSpace::IndexSetSize;
        i += CompactibleFreeListSpace::IndexSetStride) {
     _indexedFreeList[i].set_size(i);
+    _num_blocks[i] = 0;
+  }
+}
+
+static bool _CFLS_LAB_modified = false;
+
+void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
+  assert(!_CFLS_LAB_modified, "Call only once");
+  _CFLS_LAB_modified = true;
+  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+       i < CompactibleFreeListSpace::IndexSetSize;
+       i += CompactibleFreeListSpace::IndexSetStride) {
+    _blocks_to_claim[i].modify(n, wt, true /* force */);
   }
 }
 
@@ -2607,11 +2841,9 @@
     if (res == NULL) return NULL;
   } else {
     FreeList* fl = &_indexedFreeList[word_sz];
-    bool filled = false; //TRAP
     if (fl->count() == 0) {
-      bool filled = true; //TRAP
       // Attempt to refill this local free list.
-      _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
+      get_from_global_pool(word_sz, fl);
       // If it didn't work, give up.
       if (fl->count() == 0) return NULL;
     }
@@ -2626,80 +2858,190 @@
   return (HeapWord*)res;
 }
 
-void CFLS_LAB::retire() {
-  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+// Get a chunk of blocks of the right size and update related
+// book-keeping stats
+void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
+  // Get the #blocks we want to claim
+  size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
+  assert(n_blks > 0, "Error");
+  assert(ResizePLAB || n_blks == OldPLABSize, "Error");
+  // In some cases, when the application has a phase change,
+  // there may be a sudden and sharp shift in the object survival
+  // profile, and updating the counts at the end of a scavenge
+  // may not be quick enough, giving rise to large scavenge pauses
+  // during these phase changes. It is beneficial to detect such
+  // changes on-the-fly during a scavenge and avoid such a phase-change
+  // pothole. The following code is a heuristic attempt to do that.
+  // It is protected by a product flag until we have gained
+  // enough experience with this heuristic and fine-tuned its behaviour.
+  // WARNING: This might increase fragmentation if we overreact to
+  // small spikes, so some kind of historical smoothing based on
+  // previous experience with the greater reactivity might be useful.
+  // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
+  // default.
+  if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
+    size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
+    n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
+    n_blks = MIN2(n_blks, CMSOldPLABMax);
+  }
+  assert(n_blks > 0, "Error");
+  _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
+  // Update stats table entry for this block size
+  _num_blocks[word_sz] += fl->count();
+}
+
+void CFLS_LAB::compute_desired_plab_size() {
+  for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
        i < CompactibleFreeListSpace::IndexSetSize;
        i += CompactibleFreeListSpace::IndexSetStride) {
-    if (_indexedFreeList[i].count() > 0) {
-      MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
-                      Mutex::_no_safepoint_check_flag);
-      _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
-      // Reset this list.
-      _indexedFreeList[i] = FreeList();
-      _indexedFreeList[i].set_size(i);
+    assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
+           "Counter inconsistency");
+    if (_global_num_workers[i] > 0) {
+      // Need to smooth wrt historical average
+      if (ResizeOldPLAB) {
+        _blocks_to_claim[i].sample(
+          MAX2((size_t)CMSOldPLABMin,
+          MIN2((size_t)CMSOldPLABMax,
+               _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
+      }
+      // Reset counters for next round
+      _global_num_workers[i] = 0;
+      _global_num_blocks[i] = 0;
+      if (PrintOldPLAB) {
+        gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
+      }
     }
   }
 }
 
-void
-CompactibleFreeListSpace::
-par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
+void CFLS_LAB::retire(int tid) {
+  // We run this single threaded with the world stopped;
+  // so no need for locks and such.
+#define CFLS_LAB_PARALLEL_ACCESS 0
+  NOT_PRODUCT(Thread* t = Thread::current();)
+  assert(Thread::current()->is_VM_thread(), "Error");
+  assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
+         "Will access to uninitialized slot below");
+#if CFLS_LAB_PARALLEL_ACCESS
+  for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
+       i > 0;
+       i -= CompactibleFreeListSpace::IndexSetStride) {
+#else // CFLS_LAB_PARALLEL_ACCESS
+  for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
+       i < CompactibleFreeListSpace::IndexSetSize;
+       i += CompactibleFreeListSpace::IndexSetStride) {
+#endif // !CFLS_LAB_PARALLEL_ACCESS
+    assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
+           "Can't retire more than what we obtained");
+    if (_num_blocks[i] > 0) {
+      size_t num_retire =  _indexedFreeList[i].count();
+      assert(_num_blocks[i] > num_retire, "Should have used at least one");
+      {
+#if CFLS_LAB_PARALLEL_ACCESS
+        MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
+                        Mutex::_no_safepoint_check_flag);
+#endif // CFLS_LAB_PARALLEL_ACCESS
+        // Update globals stats for num_blocks used
+        _global_num_blocks[i] += (_num_blocks[i] - num_retire);
+        _global_num_workers[i]++;
+        assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
+        if (num_retire > 0) {
+          _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
+          // Reset this list.
+          _indexedFreeList[i] = FreeList();
+          _indexedFreeList[i].set_size(i);
+        }
+      }
+      if (PrintOldPLAB) {
+        gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
+                               tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
+      }
+      // Reset stats for next round
+      _num_blocks[i]         = 0;
+    }
+  }
+}
+
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
   assert(fl->count() == 0, "Precondition.");
   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
          "Precondition");
 
-  // We'll try all multiples of word_sz in the indexed set (starting with
-  // word_sz itself), then try getting a big chunk and splitting it.
-  int k = 1;
-  size_t cur_sz = k * word_sz;
-  bool found = false;
-  while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
-    FreeList* gfl = &_indexedFreeList[cur_sz];
-    FreeList fl_for_cur_sz;  // Empty.
-    fl_for_cur_sz.set_size(cur_sz);
-    {
-      MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
-                      Mutex::_no_safepoint_check_flag);
-      if (gfl->count() != 0) {
-        size_t nn = MAX2(n/k, (size_t)1);
-        gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
-        found = true;
+  // We'll try all multiples of word_sz in the indexed set, starting with
+  // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
+  // then try getting a big chunk and splitting it.
+  {
+    bool found;
+    int  k;
+    size_t cur_sz;
+    for (k = 1, cur_sz = k * word_sz, found = false;
+         (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
+         (CMSSplitIndexedFreeListBlocks || k <= 1);
+         k++, cur_sz = k * word_sz) {
+      FreeList* gfl = &_indexedFreeList[cur_sz];
+      FreeList fl_for_cur_sz;  // Empty.
+      fl_for_cur_sz.set_size(cur_sz);
+      {
+        MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
+                        Mutex::_no_safepoint_check_flag);
+        if (gfl->count() != 0) {
+          // nn is the number of chunks of size cur_sz that
+          // we'd need to split k-ways each, in order to create
+          // "n" chunks of size word_sz each.
+          const size_t nn = MAX2(n/k, (size_t)1);
+          gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
+          found = true;
+          if (k > 1) {
+            // Update split death stats for the cur_sz-size blocks list:
+            // we increment the split death count by the number of blocks
+            // we just took from the cur_sz-size blocks list and which
+            // we will be splitting below.
+            ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() +
+                             fl_for_cur_sz.count();
+            _indexedFreeList[cur_sz].set_splitDeaths(deaths);
+          }
+        }
+      }
+      // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
+      if (found) {
+        if (k == 1) {
+          fl->prepend(&fl_for_cur_sz);
+        } else {
+          // Divide each block on fl_for_cur_sz up k ways.
+          FreeChunk* fc;
+          while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
+            // Must do this in reverse order, so that anybody attempting to
+            // access the main chunk sees it as a single free block until we
+            // change it.
+            size_t fc_size = fc->size();
+            for (int i = k-1; i >= 0; i--) {
+              FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
+              ffc->setSize(word_sz);
+              ffc->linkNext(NULL);
+              ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
+              // Above must occur before BOT is updated below.
+              // splitting from the right, fc_size == (k - i + 1) * wordsize
+              _bt.mark_block((HeapWord*)ffc, word_sz);
+              fc_size -= word_sz;
+              _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
+              _bt.verify_single_block((HeapWord*)fc, fc_size);
+              _bt.verify_single_block((HeapWord*)ffc, ffc->size());
+              // Push this on "fl".
+              fl->returnChunkAtHead(ffc);
+            }
+            // TRAP
+            assert(fl->tail()->next() == NULL, "List invariant.");
+          }
+        }
+        // Update birth stats for this block size.
+        size_t num = fl->count();
+        MutexLockerEx x(_indexedFreeListParLocks[word_sz],
+                        Mutex::_no_safepoint_check_flag);
+        ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
+        _indexedFreeList[word_sz].set_splitBirths(births);
+        return;
       }
     }
-    // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
-    if (found) {
-      if (k == 1) {
-        fl->prepend(&fl_for_cur_sz);
-      } else {
-        // Divide each block on fl_for_cur_sz up k ways.
-        FreeChunk* fc;
-        while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
-          // Must do this in reverse order, so that anybody attempting to
-          // access the main chunk sees it as a single free block until we
-          // change it.
-          size_t fc_size = fc->size();
-          for (int i = k-1; i >= 0; i--) {
-            FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
-            ffc->setSize(word_sz);
-            ffc->linkNext(NULL);
-            ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
-            // Above must occur before BOT is updated below.
-            // splitting from the right, fc_size == (k - i + 1) * wordsize
-            _bt.mark_block((HeapWord*)ffc, word_sz);
-            fc_size -= word_sz;
-            _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
-            _bt.verify_single_block((HeapWord*)fc, fc_size);
-            _bt.verify_single_block((HeapWord*)ffc, ffc->size());
-            // Push this on "fl".
-            fl->returnChunkAtHead(ffc);
-          }
-          // TRAP
-          assert(fl->tail()->next() == NULL, "List invariant.");
-        }
-      }
-      return;
-    }
-    k++; cur_sz = k * word_sz;
   }
   // Otherwise, we'll split a block from the dictionary.
   FreeChunk* fc = NULL;
@@ -2723,17 +3065,31 @@
       }
     }
     if (fc == NULL) return;
+    assert((ssize_t)n >= 1, "Control point invariant");
     // Otherwise, split up that block.
-    size_t nn = fc->size() / word_sz;
+    const size_t nn = fc->size() / word_sz;
     n = MIN2(nn, n);
+    assert((ssize_t)n >= 1, "Control point invariant");
     rem = fc->size() - n * word_sz;
     // If there is a remainder, and it's too small, allocate one fewer.
     if (rem > 0 && rem < MinChunkSize) {
       n--; rem += word_sz;
     }
+    // Note that at this point we may have n == 0.
+    assert((ssize_t)n >= 0, "Control point invariant");
+
+    // If n is 0, the chunk fc that was found is not large
+    // enough to leave a viable remainder.  We are unable to
+    // allocate even one block.  Return fc to the
+    // dictionary and return, leaving "fl" empty.
+    if (n == 0) {
+      returnChunkToDictionary(fc);
+      return;
+    }
+
     // First return the remainder, if any.
     // Note that we hold the lock until we decide if we're going to give
-    // back the remainder to the dictionary, since a contending allocator
+    // back the remainder to the dictionary, since a concurrent allocation
     // may otherwise see the heap as empty.  (We're willing to take that
     // hit if the block is a small block.)
     if (rem > 0) {
@@ -2743,18 +3099,16 @@
       rem_fc->linkNext(NULL);
       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
       // Above must occur before BOT is updated below.
+      assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
       if (rem >= IndexSetSize) {
         returnChunkToDictionary(rem_fc);
-        dictionary()->dictCensusUpdate(fc->size(),
-                                       true /*split*/,
-                                       true /*birth*/);
+        dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
         rem_fc = NULL;
       }
       // Otherwise, return it to the small list below.
     }
   }
-  //
   if (rem_fc != NULL) {
     MutexLockerEx x(_indexedFreeListParLocks[rem],
                     Mutex::_no_safepoint_check_flag);
@@ -2762,7 +3116,7 @@
     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
     smallSplitBirth(rem);
   }
-
+  assert((ssize_t)n > 0 && fc != NULL, "Consistency");
   // Now do the splitting up.
   // Must do this in reverse order, so that anybody attempting to
   // access the main chunk sees it as a single free block until we
@@ -2792,13 +3146,15 @@
   _bt.verify_single_block((HeapWord*)fc, fc->size());
   fl->returnChunkAtHead(fc);
 
+  assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
   {
+    // Update the stats for this block size.
     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
                     Mutex::_no_safepoint_check_flag);
-    ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
-    _indexedFreeList[word_sz].set_splitBirths(new_births);
-    ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
-    _indexedFreeList[word_sz].set_surplus(new_surplus);
+    const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
+    _indexedFreeList[word_sz].set_splitBirths(births);
+    // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
+    // _indexedFreeList[word_sz].set_surplus(new_surplus);
   }
 
   // TRAP
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -25,8 +25,6 @@
 // Classes in support of keeping track of promotions into a non-Contiguous
 // space, in this case a CompactibleFreeListSpace.
 
-#define CFLS_LAB_REFILL_STATS 0
-
 // Forward declarations
 class CompactibleFreeListSpace;
 class BlkClosure;
@@ -89,6 +87,9 @@
     displacedHdr = (markOop*)&displacedHdr;
     nextSpoolBlock = NULL;
   }
+
+  void print_on(outputStream* st) const;
+  void print() const { print_on(gclog_or_tty); }
 };
 
 class PromotionInfo VALUE_OBJ_CLASS_SPEC {
@@ -121,7 +122,7 @@
     return _promoHead == NULL;
   }
   void startTrackingPromotions();
-  void stopTrackingPromotions();
+  void stopTrackingPromotions(uint worker_id = 0);
   bool tracking() const          { return _tracking;  }
   void track(PromotedObject* trackOop);      // keep track of a promoted oop
   // The following variant must be used when trackOop is not fully
@@ -161,6 +162,9 @@
     _nextIndex = 0;
 
   }
+
+  void print_on(outputStream* st) const;
+  void print_statistics(uint worker_id) const;
 };
 
 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
@@ -243,6 +247,7 @@
   mutable Mutex _freelistLock;
   // locking verifier convenience function
   void assert_locked() const PRODUCT_RETURN;
+  void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
 
   // Linear allocation blocks
   LinearAllocBlock _smallLinearAllocBlock;
@@ -281,13 +286,6 @@
   // Locks protecting the exact lists during par promotion allocation.
   Mutex* _indexedFreeListParLocks[IndexSetSize];
 
-#if CFLS_LAB_REFILL_STATS
-  // Some statistics.
-  jint  _par_get_chunk_from_small;
-  jint  _par_get_chunk_from_large;
-#endif
-
-
   // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
   // required to be smaller than "IndexSetSize".)  If successful,
   // adds them to "fl", which is required to be an empty free list.
@@ -320,7 +318,7 @@
   // Helper function for getChunkFromIndexedFreeList.
   // Replenish the indexed free list for this "size".  Do not take from an
   // underpopulated size.
-  FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size);
+  FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
 
   // Get a chunk from the indexed free list.  If the indexed free list
   // does not have a free chunk, try to replenish the indexed free list
@@ -430,10 +428,6 @@
   void initialize_sequential_subtasks_for_marking(int n_threads,
          HeapWord* low = NULL);
 
-#if CFLS_LAB_REFILL_STATS
-  void print_par_alloc_stats();
-#endif
-
   // Space enquiries
   size_t used() const;
   size_t free() const;
@@ -617,6 +611,12 @@
   // Do some basic checks on the the free lists.
   void checkFreeListConsistency()         const PRODUCT_RETURN;
 
+  // Printing support
+  void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
+  void print_indexed_free_lists(outputStream* st) const;
+  void print_dictionary_free_lists(outputStream* st) const;
+  void print_promo_info_blocks(outputStream* st) const;
+
   NOT_PRODUCT (
     void initializeIndexedFreeListArrayReturnedBytes();
     size_t sumIndexedFreeListArrayReturnedBytes();
@@ -638,8 +638,9 @@
 
   // Statistics functions
   // Initialize census for lists before the sweep.
-  void beginSweepFLCensus(float sweep_current,
-                          float sweep_estimate);
+  void beginSweepFLCensus(float inter_sweep_current,
+                          float inter_sweep_estimate,
+                          float intra_sweep_estimate);
   // Set the surplus for each of the free lists.
   void setFLSurplus();
   // Set the hint for each of the free lists.
@@ -730,16 +731,17 @@
   FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
 
   // Initialized from a command-line arg.
-  size_t _blocks_to_claim;
 
-#if CFLS_LAB_REFILL_STATS
-  // Some statistics.
-  int _refills;
-  int _blocksTaken;
-  static int _tot_refills;
-  static int _tot_blocksTaken;
-  static int _next_threshold;
-#endif
+  // Allocation statistics in support of dynamic adjustment of
+  // #blocks to claim per get_from_global_pool() call below.
+  static AdaptiveWeightedAverage
+                 _blocks_to_claim  [CompactibleFreeListSpace::IndexSetSize];
+  static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
+  static int    _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
+  size_t        _num_blocks        [CompactibleFreeListSpace::IndexSetSize];
+
+  // Internal work method
+  void get_from_global_pool(size_t word_sz, FreeList* fl);
 
 public:
   CFLS_LAB(CompactibleFreeListSpace* cfls);
@@ -748,7 +750,12 @@
   HeapWord* alloc(size_t word_sz);
 
   // Return any unused portions of the buffer to the global pool.
-  void retire();
+  void retire(int tid);
+
+  // Dynamic OldPLABSize sizing
+  static void compute_desired_plab_size();
+  // When the settings are modified from default static initialization
+  static void modify_initialization(size_t n, unsigned wt);
 };
 
 size_t PromotionInfo::refillSize() const {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -253,7 +253,6 @@
   }
 }
 
-
 void ConcurrentMarkSweepGeneration::ref_processor_init() {
   assert(collector() != NULL, "no collector");
   collector()->ref_processor_init();
@@ -341,6 +340,14 @@
   _icms_duty_cycle = CMSIncrementalDutyCycle;
 }
 
+double CMSStats::cms_free_adjustment_factor(size_t free) const {
+  // TBD: CR 6909490
+  return 1.0;
+}
+
+void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
+}
+
 // If promotion failure handling is on use
 // the padded average size of the promotion for each
 // young generation collection.
@@ -361,7 +368,11 @@
 
     // Adjust by the safety factor.
     double cms_free_dbl = (double)cms_free;
-    cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
+    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
+    // Apply a further correction factor which tries to adjust
+    // for recent occurance of concurrent mode failures.
+    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
+    cms_free_dbl = cms_free_dbl * cms_adjustment;
 
     if (PrintGCDetails && Verbose) {
       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
@@ -395,6 +406,8 @@
   // late.
   double work = cms_duration() + gc0_period();
   double deadline = time_until_cms_gen_full();
+  // If a concurrent mode failure occurred recently, we want to be
+  // more conservative and halve our expected time_until_cms_gen_full()
   if (work > deadline) {
     if (Verbose && PrintGCDetails) {
       gclog_or_tty->print(
@@ -556,7 +569,8 @@
   _should_unload_classes(false),
   _concurrent_cycles_since_last_unload(0),
   _roots_scanning_options(0),
-  _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
+  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
+  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
 {
   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
     ExplicitGCInvokesConcurrent = true;
@@ -709,7 +723,8 @@
 
   // Support for parallelizing survivor space rescan
   if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
-    size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
+    size_t max_plab_samples = cp->max_gen0_size()/
+                                ((SurvivorRatio+2)*MinTLABSize);
     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
@@ -772,7 +787,7 @@
   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
   _gc_counters = new CollectorCounters("CMS", 1);
   _completed_initialization = true;
-  _sweep_timer.start();  // start of time
+  _inter_sweep_timer.start();  // start of time
 }
 
 const char* ConcurrentMarkSweepGeneration::name() const {
@@ -899,6 +914,14 @@
   return result;
 }
 
+// At a promotion failure dump information on block layout in heap
+// (cms old generation).
+void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
+  if (CMSDumpAtPromotionFailure) {
+    cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
+  }
+}
+
 CompactibleSpace*
 ConcurrentMarkSweepGeneration::first_compaction_space() const {
   return _cmsSpace;
@@ -1367,12 +1390,7 @@
 ConcurrentMarkSweepGeneration::
 par_promote_alloc_done(int thread_num) {
   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
-  ps->lab.retire();
-#if CFLS_LAB_REFILL_STATS
-  if (thread_num == 0) {
-    _cmsSpace->print_par_alloc_stats();
-  }
-#endif
+  ps->lab.retire(thread_num);
 }
 
 void
@@ -1973,11 +1991,14 @@
   // We must adjust the allocation statistics being maintained
   // in the free list space. We do so by reading and clearing
   // the sweep timer and updating the block flux rate estimates below.
-  assert(_sweep_timer.is_active(), "We should never see the timer inactive");
-  _sweep_timer.stop();
-  // Note that we do not use this sample to update the _sweep_estimate.
-  _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
-                                          _sweep_estimate.padded_average());
+  assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
+  if (_inter_sweep_timer.is_active()) {
+    _inter_sweep_timer.stop();
+    // Note that we do not use this sample to update the _inter_sweep_estimate.
+    _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
+                                            _inter_sweep_estimate.padded_average(),
+                                            _intra_sweep_estimate.padded_average());
+  }
 
   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
     ref_processor(), clear_all_soft_refs);
@@ -2014,10 +2035,10 @@
   }
 
   // Adjust the per-size allocation stats for the next epoch.
-  _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
-  // Restart the "sweep timer" for next epoch.
-  _sweep_timer.reset();
-  _sweep_timer.start();
+  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
+  // Restart the "inter sweep timer" for the next epoch.
+  _inter_sweep_timer.reset();
+  _inter_sweep_timer.start();
 
   // Sample collection pause time and reset for collection interval.
   if (UseAdaptiveSizePolicy) {
@@ -2675,7 +2696,7 @@
   // Also reset promotion tracking in par gc thread states.
   if (ParallelGCThreads > 0) {
     for (uint i = 0; i < ParallelGCThreads; i++) {
-      _par_gc_thread_states[i]->promo.stopTrackingPromotions();
+      _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
     }
   }
 }
@@ -2770,7 +2791,7 @@
   bool do_bit(size_t offset) {
     HeapWord* addr = _marks->offsetToHeapWord(offset);
     if (!_marks->isMarked(addr)) {
-      oop(addr)->print();
+      oop(addr)->print_on(gclog_or_tty);
       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
       _failed = true;
     }
@@ -2819,7 +2840,7 @@
   // Clear any marks from a previous round
   verification_mark_bm()->clear_all();
   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
-  assert(overflow_list_is_empty(), "overflow list should be empty");
+  verify_work_stacks_empty();
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
@@ -2892,8 +2913,8 @@
   verification_mark_bm()->iterate(&vcl);
   if (vcl.failed()) {
     gclog_or_tty->print("Verification failed");
-    Universe::heap()->print();
-    fatal(" ... aborting");
+    Universe::heap()->print_on(gclog_or_tty);
+    fatal("CMS: failed marking verification after remark");
   }
 }
 
@@ -3313,7 +3334,7 @@
     Universe::heap()->barrier_set()->resize_covered_region(mr);
     // Hmmmm... why doesn't CFLS::set_end verify locking?
     // This is quite ugly; FIX ME XXX
-    _cmsSpace->assert_locked();
+    _cmsSpace->assert_locked(freelistLock());
     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
 
     // update the space and generation capacity counters
@@ -3634,9 +3655,7 @@
   verify_work_stacks_empty();
   verify_overflow_empty();
   assert(_revisitStack.isEmpty(), "tabula rasa");
-
-  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
-
+  DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
   bool result = false;
   if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
     result = do_marking_mt(asynch);
@@ -4103,7 +4122,6 @@
 void CMSConcMarkingTask::coordinator_yield() {
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
          "CMS thread should hold CMS token");
-
   DEBUG_ONLY(RememberKlassesChecker mux(false);)
   // First give up the locks, then yield, then re-lock
   // We should probably use a constructor/destructor idiom to
@@ -4180,9 +4198,7 @@
   // Mutate the Refs discovery so it is MT during the
   // multi-threaded marking phase.
   ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
-
-  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
-
+  DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
   conc_workers()->start_task(&tsk);
   while (tsk.yielded()) {
     tsk.coordinator_yield();
@@ -4451,7 +4467,7 @@
     // for cleaner interfaces.
     rp->preclean_discovered_references(
           rp->is_alive_non_header(), &keep_alive, &complete_trace,
-          &yield_cl);
+          &yield_cl, should_unload_classes());
   }
 
   if (clean_survivor) {  // preclean the active survivor space(s)
@@ -4473,7 +4489,7 @@
     SurvivorSpacePrecleanClosure
       sss_cl(this, _span, &_markBitMap, &_markStack,
              &pam_cl, before_count, CMSYield);
-    DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
+    DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
     dng->from()->object_iterate_careful(&sss_cl);
     dng->to()->object_iterate_careful(&sss_cl);
   }
@@ -4644,7 +4660,7 @@
         verify_work_stacks_empty();
         verify_overflow_empty();
         sample_eden();
-        DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
+        DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
         stop_point =
           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       }
@@ -4732,7 +4748,7 @@
       sample_eden();
       verify_work_stacks_empty();
       verify_overflow_empty();
-      DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
+      DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
       HeapWord* stop_point =
         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       if (stop_point != NULL) {
@@ -4832,7 +4848,7 @@
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
 
-  DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
+  DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
   if (!init_mark_was_synchronous) {
     // We might assume that we need not fill TLAB's when
     // CMSScavengeBeforeRemark is set, because we may have just done
@@ -5867,9 +5883,9 @@
   check_correct_thread_executing();
   verify_work_stacks_empty();
   verify_overflow_empty();
-  incrementSweepCount();
-  _sweep_timer.stop();
-  _sweep_estimate.sample(_sweep_timer.seconds());
+  increment_sweep_count();
+  _inter_sweep_timer.stop();
+  _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
 
   // PermGen verification support: If perm gen sweeping is disabled in
@@ -5892,6 +5908,9 @@
     }
   }
 
+  assert(!_intra_sweep_timer.is_active(), "Should not be active");
+  _intra_sweep_timer.reset();
+  _intra_sweep_timer.start();
   if (asynch) {
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
@@ -5936,8 +5955,11 @@
   verify_work_stacks_empty();
   verify_overflow_empty();
 
-  _sweep_timer.reset();
-  _sweep_timer.start();
+  _intra_sweep_timer.stop();
+  _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
+
+  _inter_sweep_timer.reset();
+  _inter_sweep_timer.start();
 
   update_time_of_last_gc(os::javaTimeMillis());
 
@@ -5980,11 +6002,11 @@
 // FIX ME!!! Looks like this belongs in CFLSpace, with
 // CMSGen merely delegating to it.
 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
-  double nearLargestPercent = 0.999;
+  double nearLargestPercent = FLSLargestBlockCoalesceProximity;
   HeapWord*  minAddr        = _cmsSpace->bottom();
   HeapWord*  largestAddr    =
     (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
-  if (largestAddr == 0) {
+  if (largestAddr == NULL) {
     // The dictionary appears to be empty.  In this case
     // try to coalesce at the end of the heap.
     largestAddr = _cmsSpace->end();
@@ -5992,6 +6014,13 @@
   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
   size_t nearLargestOffset =
     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
+  if (PrintFLSStatistics != 0) {
+    gclog_or_tty->print_cr(
+      "CMS: Large Block: " PTR_FORMAT ";"
+      " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
+      largestAddr,
+      _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
+  }
   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
 }
 
@@ -6071,9 +6100,11 @@
   assert_lock_strong(gen->freelistLock());
   assert_lock_strong(bitMapLock());
 
-  assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
-  gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
-                                      _sweep_estimate.padded_average());
+  assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
+  assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
+  gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
+                                      _inter_sweep_estimate.padded_average(),
+                                      _intra_sweep_estimate.padded_average());
   gen->setNearLargestChunk();
 
   {
@@ -6086,7 +6117,7 @@
     // end-of-sweep-census below will be off by a little bit.
   }
   gen->cmsSpace()->sweep_completed();
-  gen->cmsSpace()->endSweepFLCensus(sweepCount());
+  gen->cmsSpace()->endSweepFLCensus(sweep_count());
   if (should_unload_classes()) {                // unloaded classes this cycle,
     _concurrent_cycles_since_last_unload = 0;   // ... reset count
   } else {                                      // did not unload classes,
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -355,6 +355,11 @@
                                              unsigned int new_duty_cycle);
   unsigned int icms_update_duty_cycle_impl();
 
+  // In support of adjusting of cms trigger ratios based on history
+  // of concurrent mode failure.
+  double cms_free_adjustment_factor(size_t free) const;
+  void   adjust_cms_free_adjustment_factor(bool fail, size_t free);
+
  public:
   CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
            unsigned int alpha = CMSExpAvgFactor);
@@ -570,8 +575,11 @@
   // appropriately.
   void check_gc_time_limit();
   // XXX Move these to CMSStats ??? FIX ME !!!
-  elapsedTimer _sweep_timer;
-  AdaptivePaddedAverage _sweep_estimate;
+  elapsedTimer _inter_sweep_timer;   // time between sweeps
+  elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
+  // padded decaying average estimates of the above
+  AdaptivePaddedAverage _inter_sweep_estimate;
+  AdaptivePaddedAverage _intra_sweep_estimate;
 
  protected:
   ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
@@ -625,6 +633,7 @@
   // . _collectorState <= Idling ==  post-sweep && pre-mark
   // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
   //                                            precleaning || abortablePrecleanb
+ public:
   enum CollectorState {
     Resizing            = 0,
     Resetting           = 1,
@@ -636,6 +645,7 @@
     FinalMarking        = 7,
     Sweeping            = 8
   };
+ protected:
   static CollectorState _collectorState;
 
   // State related to prologue/epilogue invocation for my generations
@@ -655,7 +665,7 @@
 
   int    _numYields;
   size_t _numDirtyCards;
-  uint   _sweepCount;
+  size_t _sweep_count;
   // number of full gc's since the last concurrent gc.
   uint   _full_gcs_since_conc_gc;
 
@@ -905,7 +915,7 @@
 
   // Check that the currently executing thread is the expected
   // one (foreground collector or background collector).
-  void check_correct_thread_executing()        PRODUCT_RETURN;
+  static void check_correct_thread_executing() PRODUCT_RETURN;
   // XXXPERM void print_statistics()           PRODUCT_RETURN;
 
   bool is_cms_reachable(HeapWord* addr);
@@ -930,8 +940,8 @@
   static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
   static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
   static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
-  uint  sweepCount() const             { return _sweepCount; }
-  void incrementSweepCount()           { _sweepCount++; }
+  size_t sweep_count() const             { return _sweep_count; }
+  void   increment_sweep_count()         { _sweep_count++; }
 
   // Timers/stats for gc scheduling and incremental mode pacing.
   CMSStats& stats() { return _stats; }
@@ -1165,6 +1175,11 @@
   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
     bool younger_handles_promotion_failure) const;
 
+  // Inform this (non-young) generation that a promotion failure was
+  // encountered during a collection of a younger generation that
+  // promotes into this generation.
+  virtual void promotion_failure_occurred();
+
   bool should_collect(bool full, size_t size, bool tlab);
   virtual bool should_concurrent_collect() const;
   virtual bool is_too_full() const;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -55,7 +55,8 @@
   virtual void       dictCensusUpdate(size_t size, bool split, bool birth) = 0;
   virtual bool       coalDictOverPopulated(size_t size) = 0;
   virtual void       beginSweepDictCensus(double coalSurplusPercent,
-                       float sweep_current, float sweep_ewstimate) = 0;
+                       float inter_sweep_current, float inter_sweep_estimate,
+                       float intra__sweep_current) = 0;
   virtual void       endSweepDictCensus(double splitSurplusPercent) = 0;
   virtual FreeChunk* findLargestDict() const = 0;
   // verify that the given chunk is in the dictionary.
@@ -79,6 +80,7 @@
   }
 
   virtual void       printDictCensus() const = 0;
+  virtual void       print_free_lists(outputStream* st) const = 0;
 
   virtual void       verify()         const = 0;
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -67,3 +67,8 @@
   }
 }
 #endif
+
+void FreeChunk::print_on(outputStream* st) {
+  st->print_cr("Next: " PTR_FORMAT " Prev: " PTR_FORMAT " %s",
+    next(), prev(), cantCoalesce() ? "[can't coalesce]" : "");
+}
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -129,6 +129,8 @@
   void verifyList()         const PRODUCT_RETURN;
   void mangleAllocated(size_t size) PRODUCT_RETURN;
   void mangleFreed(size_t size)     PRODUCT_RETURN;
+
+  void print_on(outputStream* st);
 };
 
 // Alignment helpers etc.
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -81,8 +81,8 @@
   set_hint(hint);
 }
 
-void FreeList::init_statistics() {
-  _allocation_stats.initialize();
+void FreeList::init_statistics(bool split_birth) {
+  _allocation_stats.initialize(split_birth);
 }
 
 FreeChunk* FreeList::getChunkAtHead() {
@@ -292,14 +292,31 @@
 }
 
 #ifndef PRODUCT
+void FreeList::verify_stats() const {
+  // The +1 of the LH comparand is to allow some "looseness" in
+  // checking: we usually call this interface when adding a block
+  // and we'll subsequently update the stats; we cannot update the
+  // stats beforehand because in the case of the large-block BT
+  // dictionary for example, this might be the first block and
+  // in that case there would be no place that we could record
+  // the stats (which are kept in the block itself).
+  assert(_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + 1   // Total Stock + 1
+          >= _allocation_stats.splitDeaths() + (ssize_t)count(), "Conservation Principle");
+}
+
 void FreeList::assert_proper_lock_protection_work() const {
-#ifdef ASSERT
-  if (_protecting_lock != NULL &&
-      SharedHeap::heap()->n_par_threads() > 0) {
-    // Should become an assert.
-    guarantee(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
+  assert(_protecting_lock != NULL, "Don't call this directly");
+  assert(ParallelGCThreads > 0, "Don't call this directly");
+  Thread* thr = Thread::current();
+  if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
+    // assert that we are holding the freelist lock
+  } else if (thr->is_GC_task_thread()) {
+    assert(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
+  } else if (thr->is_Java_thread()) {
+    assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
+  } else {
+    ShouldNotReachHere();  // unaccounted thread type?
   }
-#endif
 }
 #endif
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -35,18 +35,26 @@
 // for that implementation.
 
 class Mutex;
+class TreeList;
 
 class FreeList VALUE_OBJ_CLASS_SPEC {
   friend class CompactibleFreeListSpace;
   friend class VMStructs;
-  friend class printTreeCensusClosure;
-  FreeChunk*    _head;          // List of free chunks
+  friend class PrintTreeCensusClosure;
+
+ protected:
+  TreeList* _parent;
+  TreeList* _left;
+  TreeList* _right;
+
+ private:
+  FreeChunk*    _head;          // Head of list of free chunks
   FreeChunk*    _tail;          // Tail of list of free chunks
-  size_t        _size;          // Size in Heap words of each chunks
+  size_t        _size;          // Size in Heap words of each chunk
   ssize_t       _count;         // Number of entries in list
   size_t        _hint;          // next larger size list with a positive surplus
 
-  AllocationStats _allocation_stats;            // statistics for smart allocation
+  AllocationStats _allocation_stats; // allocation-related statistics
 
 #ifdef ASSERT
   Mutex*        _protecting_lock;
@@ -63,9 +71,12 @@
 
   // Initialize the allocation statistics.
  protected:
-  void init_statistics();
+  void init_statistics(bool split_birth = false);
   void set_count(ssize_t v) { _count = v;}
-  void increment_count()    { _count++; }
+  void increment_count()    {
+    _count++;
+  }
+
   void decrement_count() {
     _count--;
     assert(_count >= 0, "Count should not be negative");
@@ -167,11 +178,13 @@
     _allocation_stats.set_desired(v);
   }
   void compute_desired(float inter_sweep_current,
-                       float inter_sweep_estimate) {
+                       float inter_sweep_estimate,
+                       float intra_sweep_estimate) {
     assert_proper_lock_protection();
     _allocation_stats.compute_desired(_count,
                                       inter_sweep_current,
-                                      inter_sweep_estimate);
+                                      inter_sweep_estimate,
+                                      intra_sweep_estimate);
   }
   ssize_t coalDesired() const {
     return _allocation_stats.coalDesired();
@@ -306,6 +319,9 @@
   // found.  Return NULL if "fc" is not found.
   bool verifyChunkInFreeLists(FreeChunk* fc) const;
 
+  // Stats verification
+  void verify_stats() const PRODUCT_RETURN;
+
   // Printing support
   static void print_labels_on(outputStream* st, const char* c);
   void print_on(outputStream* st, const char* c = NULL) const;
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -351,9 +351,16 @@
 CollectionSetChooser::printSortedHeapRegions() {
   gclog_or_tty->print_cr("Printing %d Heap Regions sorted by amount of known garbage",
                 _numMarkedRegions);
+
+  DEBUG_ONLY(int marked_count = 0;)
   for (int i = 0; i < _markedRegions.length(); i++) {
-    printHeapRegion(_markedRegions.at(i));
+    HeapRegion* r = _markedRegions.at(i);
+    if (r != NULL) {
+      printHeapRegion(r);
+      DEBUG_ONLY(marked_count++;)
+    }
   }
+  assert(marked_count == _numMarkedRegions, "must be");
   gclog_or_tty->print_cr("Done sorted heap region print");
 }
 
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -42,28 +42,49 @@
   _n_periods(0),
   _threads(NULL), _n_threads(0)
 {
-  if (G1ConcRefine) {
-    _n_threads = (int)thread_num();
-    if (_n_threads > 0) {
-      _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
-      int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
-      ConcurrentG1RefineThread *next = NULL;
-      for (int i = _n_threads - 1; i >= 0; i--) {
-        ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
-        assert(t != NULL, "Conc refine should have been created");
-        assert(t->cg1r() == this, "Conc refine thread should refer to this");
-        _threads[i] = t;
-        next = t;
-      }
-    }
+
+  // Ergomonically select initial concurrent refinement parameters
+  if (FLAG_IS_DEFAULT(G1ConcRefineGreenZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefineGreenZone, MAX2<int>(ParallelGCThreads, 1));
+  }
+  set_green_zone(G1ConcRefineGreenZone);
+
+  if (FLAG_IS_DEFAULT(G1ConcRefineYellowZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefineYellowZone, green_zone() * 3);
+  }
+  set_yellow_zone(MAX2<int>(G1ConcRefineYellowZone, green_zone()));
+
+  if (FLAG_IS_DEFAULT(G1ConcRefineRedZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefineRedZone, yellow_zone() * 2);
+  }
+  set_red_zone(MAX2<int>(G1ConcRefineRedZone, yellow_zone()));
+  _n_worker_threads = thread_num();
+  // We need one extra thread to do the young gen rset size sampling.
+  _n_threads = _n_worker_threads + 1;
+  reset_threshold_step();
+
+  _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
+  int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
+  ConcurrentG1RefineThread *next = NULL;
+  for (int i = _n_threads - 1; i >= 0; i--) {
+    ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
+    assert(t != NULL, "Conc refine should have been created");
+    assert(t->cg1r() == this, "Conc refine thread should refer to this");
+    _threads[i] = t;
+    next = t;
   }
 }
 
-size_t ConcurrentG1Refine::thread_num() {
-  if (G1ConcRefine) {
-    return (G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads;
+void ConcurrentG1Refine::reset_threshold_step() {
+  if (FLAG_IS_DEFAULT(G1ConcRefineThresholdStep)) {
+    _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
+  } else {
+    _thread_threshold_step = G1ConcRefineThresholdStep;
   }
-  return 0;
+}
+
+int ConcurrentG1Refine::thread_num() {
+  return MAX2<int>((G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads, 1);
 }
 
 void ConcurrentG1Refine::init() {
@@ -123,6 +144,15 @@
   }
 }
 
+void ConcurrentG1Refine::reinitialize_threads() {
+  reset_threshold_step();
+  if (_threads != NULL) {
+    for (int i = 0; i < _n_threads; i++) {
+      _threads[i]->initialize();
+    }
+  }
+}
+
 ConcurrentG1Refine::~ConcurrentG1Refine() {
   if (G1ConcRSLogCacheSize > 0) {
     assert(_card_counts != NULL, "Logic");
@@ -270,7 +300,23 @@
   int count;
   jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
   assert(cached_ptr != NULL, "bad cached card ptr");
-  assert(!is_young_card(cached_ptr), "shouldn't get a card in young region");
+
+  if (is_young_card(cached_ptr)) {
+    // The region containing cached_ptr has been freed during a clean up
+    // pause, reallocated, and tagged as young.
+    assert(cached_ptr != card_ptr, "shouldn't be");
+
+    // We've just inserted a new old-gen card pointer into the card count
+    // cache and evicted the previous contents of that count slot.
+    // The evicted card pointer has been determined to be in a young region
+    // and so cannot be the newly inserted card pointer (that will be
+    // in an old region).
+    // The count for newly inserted card will be set to zero during the
+    // insertion, so we don't want to defer the cleaning of the newly
+    // inserted card pointer.
+    assert(*defer == false, "deferring non-hot card");
+    return NULL;
+  }
 
   // The card pointer we obtained from card count cache is not hot
   // so do not store it in the cache; return it for immediate
@@ -384,4 +430,3 @@
     st->cr();
   }
 }
-
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -29,6 +29,31 @@
 class ConcurrentG1Refine: public CHeapObj {
   ConcurrentG1RefineThread** _threads;
   int _n_threads;
+  int _n_worker_threads;
+ /*
+  * The value of the update buffer queue length falls into one of 3 zones:
+  * green, yellow, red. If the value is in [0, green) nothing is
+  * done, the buffers are left unprocessed to enable the caching effect of the
+  * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
+  * threads are gradually activated. In [yellow, red) all threads are
+  * running. If the length becomes red (max queue length) the mutators start
+  * processing the buffers.
+  *
+  * There are some interesting cases (with G1AdaptiveConcRefine turned off):
+  * 1) green = yellow = red = 0. In this case the mutator will process all
+  *    buffers. Except for those that are created by the deferred updates
+  *    machinery during a collection.
+  * 2) green = 0. Means no caching. Can be a good way to minimize the
+  *    amount of time spent updating rsets during a collection.
+  */
+  int _green_zone;
+  int _yellow_zone;
+  int _red_zone;
+
+  int _thread_threshold_step;
+
+  // Reset the threshold step value based of the current zone boundaries.
+  void reset_threshold_step();
 
   // The cache for card refinement.
   bool   _use_cache;
@@ -147,6 +172,8 @@
   void init(); // Accomplish some initialization that has to wait.
   void stop();
 
+  void reinitialize_threads();
+
   // Iterate over the conc refine threads
   void threads_do(ThreadClosure *tc);
 
@@ -178,7 +205,20 @@
 
   void clear_and_record_card_counts();
 
-  static size_t thread_num();
+  static int thread_num();
 
   void print_worker_threads_on(outputStream* st) const;
+
+  void set_green_zone(int x)  { _green_zone = x;  }
+  void set_yellow_zone(int x) { _yellow_zone = x; }
+  void set_red_zone(int x)    { _red_zone = x;    }
+
+  int green_zone() const      { return _green_zone;  }
+  int yellow_zone() const     { return _yellow_zone; }
+  int red_zone() const        { return _red_zone;    }
+
+  int total_thread_num() const  { return _n_threads;        }
+  int worker_thread_num() const { return _n_worker_threads; }
+
+  int thread_threshold_step() const { return _thread_threshold_step; }
 };
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -25,10 +25,6 @@
 #include "incls/_precompiled.incl"
 #include "incls/_concurrentG1RefineThread.cpp.incl"
 
-// ======= Concurrent Mark Thread ========
-
-// The CM thread is created when the G1 garbage collector is used
-
 ConcurrentG1RefineThread::
 ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
                          int worker_id_offset, int worker_id) :
@@ -37,19 +33,42 @@
   _worker_id(worker_id),
   _active(false),
   _next(next),
+  _monitor(NULL),
   _cg1r(cg1r),
-  _vtime_accum(0.0),
-  _interval_ms(5.0)
+  _vtime_accum(0.0)
 {
+
+  // Each thread has its own monitor. The i-th thread is responsible for signalling
+  // to thread i+1 if the number of buffers in the queue exceeds a threashold for this
+  // thread. Monitors are also used to wake up the threads during termination.
+  // The 0th worker in notified by mutator threads and has a special monitor.
+  // The last worker is used for young gen rset size sampling.
+  if (worker_id > 0) {
+    _monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true);
+  } else {
+    _monitor = DirtyCardQ_CBL_mon;
+  }
+  initialize();
   create_and_start();
 }
 
+void ConcurrentG1RefineThread::initialize() {
+  if (_worker_id < cg1r()->worker_thread_num()) {
+    // Current thread activation threshold
+    _threshold = MIN2<int>(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
+                           cg1r()->yellow_zone());
+    // A thread deactivates once the number of buffer reached a deactivation threshold
+    _deactivation_threshold = MAX2<int>(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());
+  } else {
+    set_active(true);
+  }
+}
+
 void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   G1CollectorPolicy* g1p = g1h->g1_policy();
   if (g1p->adaptive_young_list_length()) {
     int regions_visited = 0;
-
     g1h->young_list_rs_length_sampling_init();
     while (g1h->young_list_rs_length_sampling_more()) {
       g1h->young_list_rs_length_sampling_next();
@@ -70,99 +89,121 @@
   }
 }
 
-void ConcurrentG1RefineThread::run() {
-  initialize_in_thread();
+void ConcurrentG1RefineThread::run_young_rs_sampling() {
+  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   _vtime_start = os::elapsedVTime();
-  wait_for_universe_init();
+  while(!_should_terminate) {
+    _sts.join();
+    sample_young_list_rs_lengths();
+    _sts.leave();
 
-  while (!_should_terminate) {
-    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-    // Wait for completed log buffers to exist.
-    {
-      MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
-      while (((_worker_id == 0 && !dcqs.process_completed_buffers()) ||
-              (_worker_id > 0 && !is_active())) &&
-             !_should_terminate) {
-         DirtyCardQ_CBL_mon->wait(Mutex::_no_safepoint_check_flag);
-      }
-    }
-
-    if (_should_terminate) {
-      return;
+    if (os::supports_vtime()) {
+      _vtime_accum = (os::elapsedVTime() - _vtime_start);
+    } else {
+      _vtime_accum = 0.0;
     }
 
-    // Now we take them off (this doesn't hold locks while it applies
-    // closures.)  (If we did a full collection, then we'll do a full
-    // traversal.
-    _sts.join();
-    int n_logs = 0;
-    int lower_limit = 0;
-    double start_vtime_sec; // only used when G1SmoothConcRefine is on
-    int prev_buffer_num; // only used when G1SmoothConcRefine is on
-    // This thread activation threshold
-    int threshold = G1UpdateBufferQueueProcessingThreshold * _worker_id;
-    // Next thread activation threshold
-    int next_threshold = threshold + G1UpdateBufferQueueProcessingThreshold;
-    int deactivation_threshold = MAX2<int>(threshold - G1UpdateBufferQueueProcessingThreshold / 2, 0);
+    MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+    if (_should_terminate) {
+      break;
+    }
+    _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefineServiceInterval);
+  }
+}
 
-    if (G1SmoothConcRefine) {
-      lower_limit = 0;
-      start_vtime_sec = os::elapsedVTime();
-      prev_buffer_num = (int) dcqs.completed_buffers_num();
-    } else {
-      lower_limit = G1UpdateBufferQueueProcessingThreshold / 4; // For now.
+void ConcurrentG1RefineThread::wait_for_completed_buffers() {
+  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  while (!_should_terminate && !is_active()) {
+    _monitor->wait(Mutex::_no_safepoint_check_flag);
+  }
+}
+
+bool ConcurrentG1RefineThread::is_active() {
+  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+  return _worker_id > 0 ? _active : dcqs.process_completed_buffers();
+}
+
+void ConcurrentG1RefineThread::activate() {
+  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  if (_worker_id > 0) {
+    if (G1TraceConcurrentRefinement) {
+      DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+      gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
+                             _worker_id, _threshold, (int)dcqs.completed_buffers_num());
     }
-    while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, lower_limit)) {
-      double end_vtime_sec;
-      double elapsed_vtime_sec;
-      int elapsed_vtime_ms;
-      int curr_buffer_num = (int) dcqs.completed_buffers_num();
+    set_active(true);
+  } else {
+    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+    dcqs.set_process_completed(true);
+  }
+  _monitor->notify();
+}
 
-      if (G1SmoothConcRefine) {
-        end_vtime_sec = os::elapsedVTime();
-        elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
-        elapsed_vtime_ms = (int) (elapsed_vtime_sec * 1000.0);
+void ConcurrentG1RefineThread::deactivate() {
+  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  if (_worker_id > 0) {
+    if (G1TraceConcurrentRefinement) {
+      DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+      gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
+                             _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());
+    }
+    set_active(false);
+  } else {
+    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+    dcqs.set_process_completed(false);
+  }
+}
+
+void ConcurrentG1RefineThread::run() {
+  initialize_in_thread();
+  wait_for_universe_init();
 
-        if (curr_buffer_num > prev_buffer_num ||
-            curr_buffer_num > next_threshold) {
-          decreaseInterval(elapsed_vtime_ms);
-        } else if (curr_buffer_num < prev_buffer_num) {
-          increaseInterval(elapsed_vtime_ms);
-        }
+  if (_worker_id >= cg1r()->worker_thread_num()) {
+    run_young_rs_sampling();
+    terminate();
+  }
+
+  _vtime_start = os::elapsedVTime();
+  while (!_should_terminate) {
+    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+
+    // Wait for work
+    wait_for_completed_buffers();
+
+    if (_should_terminate) {
+      break;
+    }
+
+    _sts.join();
+
+    do {
+      int curr_buffer_num = (int)dcqs.completed_buffers_num();
+      // If the number of the buffers falls down into the yellow zone,
+      // that means that the transition period after the evacuation pause has ended.
+      if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
+        dcqs.set_completed_queue_padding(0);
       }
-      if (_worker_id == 0) {
-        sample_young_list_rs_lengths();
-      } else if (curr_buffer_num < deactivation_threshold) {
+
+      if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {
         // If the number of the buffer has fallen below our threshold
         // we should deactivate. The predecessor will reactivate this
         // thread should the number of the buffers cross the threshold again.
-        MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
         deactivate();
-        if (G1TraceConcurrentRefinement) {
-          gclog_or_tty->print_cr("G1-Refine-deactivated worker %d", _worker_id);
-        }
         break;
       }
 
       // Check if we need to activate the next thread.
-      if (curr_buffer_num > next_threshold && _next != NULL && !_next->is_active()) {
-        MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
+      if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
         _next->activate();
-        DirtyCardQ_CBL_mon->notify_all();
-        if (G1TraceConcurrentRefinement) {
-          gclog_or_tty->print_cr("G1-Refine-activated worker %d", _next->_worker_id);
-        }
       }
+    } while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, cg1r()->green_zone()));
 
-      if (G1SmoothConcRefine) {
-        prev_buffer_num = curr_buffer_num;
-        _sts.leave();
-        os::sleep(Thread::current(), (jlong) _interval_ms, false);
-        _sts.join();
-        start_vtime_sec = os::elapsedVTime();
-      }
-      n_logs++;
+    // We can exit the loop above while being active if there was a yield request.
+    if (is_active()) {
+      deactivate();
     }
+
     _sts.leave();
 
     if (os::supports_vtime()) {
@@ -172,7 +213,6 @@
     }
   }
   assert(_should_terminate, "just checking");
-
   terminate();
 }
 
@@ -191,8 +231,8 @@
   }
 
   {
-    MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
-    DirtyCardQ_CBL_mon->notify_all();
+    MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+    _monitor->notify();
   }
 
   {
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -40,42 +40,36 @@
   // when the number of the rset update buffer crosses a certain threshold. A successor
   // would self-deactivate when the number of the buffers falls below the threshold.
   bool _active;
-  ConcurrentG1RefineThread *       _next;
- public:
-  virtual void run();
-
-  bool is_active()  { return _active;  }
-  void activate()   { _active = true;  }
-  void deactivate() { _active = false; }
-
- private:
-  ConcurrentG1Refine*              _cg1r;
-
-  double                           _interval_ms;
+  ConcurrentG1RefineThread* _next;
+  Monitor* _monitor;
+  ConcurrentG1Refine* _cg1r;
 
-  void decreaseInterval(int processing_time_ms) {
-    double min_interval_ms = (double) processing_time_ms;
-    _interval_ms = 0.8 * _interval_ms;
-    if (_interval_ms < min_interval_ms)
-      _interval_ms = min_interval_ms;
-  }
-  void increaseInterval(int processing_time_ms) {
-    double max_interval_ms = 9.0 * (double) processing_time_ms;
-    _interval_ms = 1.1 * _interval_ms;
-    if (max_interval_ms > 0 && _interval_ms > max_interval_ms)
-      _interval_ms = max_interval_ms;
-  }
+  int _thread_threshold_step;
+  // This thread activation threshold
+  int _threshold;
+  // This thread deactivation threshold
+  int _deactivation_threshold;
 
-  void sleepBeforeNextCycle();
+  void sample_young_list_rs_lengths();
+  void run_young_rs_sampling();
+  void wait_for_completed_buffers();
+
+  void set_active(bool x) { _active = x; }
+  bool is_active();
+  void activate();
+  void deactivate();
 
   // For use by G1CollectedHeap, which is a friend.
   static SuspendibleThreadSet* sts() { return &_sts; }
 
- public:
+public:
+  virtual void run();
   // Constructor
   ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
                            int worker_id_offset, int worker_id);
 
+  void initialize();
+
   // Printing
   void print() const;
   void print_on(outputStream* st) const;
@@ -83,13 +77,10 @@
   // Total virtual time so far.
   double vtime_accum() { return _vtime_accum; }
 
-  ConcurrentG1Refine* cg1r()                     { return _cg1r;     }
-
-  void            sample_young_list_rs_lengths();
+  ConcurrentG1Refine* cg1r() { return _cg1r;     }
 
   // Yield for GC
-  void            yield();
-
+  void yield();
   // shutdown
   void stop();
 };
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -760,7 +760,6 @@
   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-  satb_mq_set.set_process_completed_threshold(G1SATBProcessCompletedThreshold);
   satb_mq_set.set_active_all_threads(true);
 
   // update_g1_committed() will be called at the end of an evac pause
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -61,8 +61,8 @@
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif // _MSC_VER
 
-DirtyCardQueueSet::DirtyCardQueueSet() :
-  PtrQueueSet(true /*notify_when_complete*/),
+DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
+  PtrQueueSet(notify_when_complete),
   _closure(NULL),
   _shared_dirty_card_queue(this, true /*perm*/),
   _free_ids(NULL),
@@ -77,12 +77,12 @@
 }
 
 void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
+                                   int process_completed_threshold,
                                    int max_completed_queue,
                                    Mutex* lock, PtrQueueSet* fl_owner) {
-  PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue, fl_owner);
+  PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold,
+                          max_completed_queue, fl_owner);
   set_buffer_size(G1UpdateBufferSize);
-  set_process_completed_threshold(G1UpdateBufferQueueProcessingThreshold);
-
   _shared_dirty_card_queue.set_lock(lock);
   _free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon);
 }
@@ -154,9 +154,10 @@
   return b;
 }
 
-DirtyCardQueueSet::CompletedBufferNode*
-DirtyCardQueueSet::get_completed_buffer_lock(int stop_at) {
-  CompletedBufferNode* nd = NULL;
+
+BufferNode*
+DirtyCardQueueSet::get_completed_buffer(int stop_at) {
+  BufferNode* nd = NULL;
   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 
   if ((int)_n_completed_buffers <= stop_at) {
@@ -166,53 +167,31 @@
 
   if (_completed_buffers_head != NULL) {
     nd = _completed_buffers_head;
-    _completed_buffers_head = nd->next;
+    _completed_buffers_head = nd->next();
     if (_completed_buffers_head == NULL)
       _completed_buffers_tail = NULL;
     _n_completed_buffers--;
+    assert(_n_completed_buffers >= 0, "Invariant");
   }
   debug_only(assert_completed_buffer_list_len_correct_locked());
   return nd;
 }
 
-// We only do this in contexts where there is no concurrent enqueueing.
-DirtyCardQueueSet::CompletedBufferNode*
-DirtyCardQueueSet::get_completed_buffer_CAS() {
-  CompletedBufferNode* nd = _completed_buffers_head;
-
-  while (nd != NULL) {
-    CompletedBufferNode* next = nd->next;
-    CompletedBufferNode* result =
-      (CompletedBufferNode*)Atomic::cmpxchg_ptr(next,
-                                                &_completed_buffers_head,
-                                                nd);
-    if (result == nd) {
-      return result;
-    } else {
-      nd = _completed_buffers_head;
-    }
-  }
-  assert(_completed_buffers_head == NULL, "Loop post");
-  _completed_buffers_tail = NULL;
-  return NULL;
-}
-
 bool DirtyCardQueueSet::
 apply_closure_to_completed_buffer_helper(int worker_i,
-                                         CompletedBufferNode* nd) {
+                                         BufferNode* nd) {
   if (nd != NULL) {
+    void **buf = BufferNode::make_buffer_from_node(nd);
+    size_t index = nd->index();
     bool b =
-      DirtyCardQueue::apply_closure_to_buffer(_closure, nd->buf,
-                                              nd->index, _sz,
+      DirtyCardQueue::apply_closure_to_buffer(_closure, buf,
+                                              index, _sz,
                                               true, worker_i);
-    void** buf = nd->buf;
-    size_t index = nd->index;
-    delete nd;
     if (b) {
       deallocate_buffer(buf);
       return true;  // In normal case, go on to next buffer.
     } else {
-      enqueue_complete_buffer(buf, index, true);
+      enqueue_complete_buffer(buf, index);
       return false;
     }
   } else {
@@ -222,40 +201,36 @@
 
 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
                                                           int stop_at,
-                                                          bool with_CAS)
+                                                          bool during_pause)
 {
-  CompletedBufferNode* nd = NULL;
-  if (with_CAS) {
-    guarantee(stop_at == 0, "Precondition");
-    nd = get_completed_buffer_CAS();
-  } else {
-    nd = get_completed_buffer_lock(stop_at);
-  }
+  assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
+  BufferNode* nd = get_completed_buffer(stop_at);
   bool res = apply_closure_to_completed_buffer_helper(worker_i, nd);
   if (res) Atomic::inc(&_processed_buffers_rs_thread);
   return res;
 }
 
 void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() {
-  CompletedBufferNode* nd = _completed_buffers_head;
+  BufferNode* nd = _completed_buffers_head;
   while (nd != NULL) {
     bool b =
-      DirtyCardQueue::apply_closure_to_buffer(_closure, nd->buf, 0, _sz,
-                                              false);
+      DirtyCardQueue::apply_closure_to_buffer(_closure,
+                                              BufferNode::make_buffer_from_node(nd),
+                                              0, _sz, false);
     guarantee(b, "Should not stop early.");
-    nd = nd->next;
+    nd = nd->next();
   }
 }
 
 void DirtyCardQueueSet::abandon_logs() {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
-  CompletedBufferNode* buffers_to_delete = NULL;
+  BufferNode* buffers_to_delete = NULL;
   {
     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
     while (_completed_buffers_head != NULL) {
-      CompletedBufferNode* nd = _completed_buffers_head;
-      _completed_buffers_head = nd->next;
-      nd->next = buffers_to_delete;
+      BufferNode* nd = _completed_buffers_head;
+      _completed_buffers_head = nd->next();
+      nd->set_next(buffers_to_delete);
       buffers_to_delete = nd;
     }
     _n_completed_buffers = 0;
@@ -263,10 +238,9 @@
     debug_only(assert_completed_buffer_list_len_correct_locked());
   }
   while (buffers_to_delete != NULL) {
-    CompletedBufferNode* nd = buffers_to_delete;
-    buffers_to_delete = nd->next;
-    deallocate_buffer(nd->buf);
-    delete nd;
+    BufferNode* nd = buffers_to_delete;
+    buffers_to_delete = nd->next();
+    deallocate_buffer(BufferNode::make_buffer_from_node(nd));
   }
   // Since abandon is done only at safepoints, we can safely manipulate
   // these queues.
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,11 +84,12 @@
   jint _processed_buffers_rs_thread;
 
 public:
-  DirtyCardQueueSet();
+  DirtyCardQueueSet(bool notify_when_complete = true);
 
   void initialize(Monitor* cbl_mon, Mutex* fl_lock,
-                  int max_completed_queue = 0,
-                  Mutex* lock = NULL, PtrQueueSet* fl_owner = NULL);
+                  int process_completed_threshold,
+                  int max_completed_queue,
+                  Mutex* lock, PtrQueueSet* fl_owner = NULL);
 
   // The number of parallel ids that can be claimed to allow collector or
   // mutator threads to do card-processing work.
@@ -120,12 +121,13 @@
   // is returned to the completed buffer set, and this call returns false.
   bool apply_closure_to_completed_buffer(int worker_i = 0,
                                          int stop_at = 0,
-                                         bool with_CAS = false);
+                                         bool during_pause = false);
+
   bool apply_closure_to_completed_buffer_helper(int worker_i,
-                                                CompletedBufferNode* nd);
+                                                BufferNode* nd);
 
-  CompletedBufferNode* get_completed_buffer_CAS();
-  CompletedBufferNode* get_completed_buffer_lock(int stop_at);
+  BufferNode* get_completed_buffer(int stop_at);
+
   // Applies the current closure to all completed buffers,
   // non-consumptively.
   void apply_closure_to_all_completed_buffers();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -928,6 +928,8 @@
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
 
+    TraceMemoryManagerStats tms(true /* fullGC */);
+
     double start = os::elapsedTime();
     g1_policy()->record_full_collection_start();
 
@@ -1001,6 +1003,8 @@
 
     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 
+    MemoryService::track_memory_usage();
+
     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
       HandleMark hm;  // Discard invalid handles created during verification
       gclog_or_tty->print(" VerifyAfterGC:");
@@ -1371,6 +1375,7 @@
 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
   SharedHeap(policy_),
   _g1_policy(policy_),
+  _dirty_card_queue_set(false),
   _ref_processor(NULL),
   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
   _bot_shared(NULL),
@@ -1436,6 +1441,7 @@
 }
 
 jint G1CollectedHeap::initialize() {
+  CollectedHeap::pre_initialize();
   os::enable_vtime();
 
   // Necessary to satisfy locking discipline assertions.
@@ -1456,8 +1462,6 @@
   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
 
-  // We allocate this in any case, but only do no work if the command line
-  // param is off.
   _cg1r = new ConcurrentG1Refine();
 
   // Reserve the maximum.
@@ -1590,18 +1594,20 @@
 
   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
                                                SATB_Q_FL_lock,
-                                               0,
+                                               G1SATBProcessCompletedThreshold,
                                                Shared_SATB_Q_lock);
 
   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
                                                 DirtyCardQ_FL_lock,
-                                                G1UpdateBufferQueueMaxLength,
+                                                concurrent_g1_refine()->yellow_zone(),
+                                                concurrent_g1_refine()->red_zone(),
                                                 Shared_DirtyCardQ_lock);
 
   if (G1DeferredRSUpdate) {
     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
                                       DirtyCardQ_FL_lock,
-                                      0,
+                                      -1, // never trigger processing
+                                      -1, // no limit on length
                                       Shared_DirtyCardQ_lock,
                                       &JavaThread::dirty_card_queue_set());
   }
@@ -1732,13 +1738,6 @@
   return car->free();
 }
 
-void G1CollectedHeap::collect(GCCause::Cause cause) {
-  // The caller doesn't have the Heap_lock
-  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
-  MutexLocker ml(Heap_lock);
-  collect_locked(cause);
-}
-
 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   assert(Thread::current()->is_VM_thread(), "Precondition#1");
   assert(Heap_lock->is_locked(), "Precondition#2");
@@ -1755,17 +1754,31 @@
   }
 }
 
-
-void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
-  // Don't want to do a GC until cleanup is completed.
-  wait_for_cleanup_complete();
-
-  // Read the GC count while holding the Heap_lock
-  int gc_count_before = SharedHeap::heap()->total_collections();
+void G1CollectedHeap::collect(GCCause::Cause cause) {
+  // The caller doesn't have the Heap_lock
+  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
+
+  int gc_count_before;
   {
-    MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
-    VM_G1CollectFull op(gc_count_before, cause);
-    VMThread::execute(&op);
+    MutexLocker ml(Heap_lock);
+    // Read the GC count while holding the Heap_lock
+    gc_count_before = SharedHeap::heap()->total_collections();
+
+    // Don't want to do a GC until cleanup is completed.
+    wait_for_cleanup_complete();
+  } // We give up heap lock; VMThread::execute gets it back below
+  switch (cause) {
+    case GCCause::_scavenge_alot: {
+      // Do an incremental pause, which might sometimes be abandoned.
+      VM_G1IncCollectionPause op(gc_count_before, cause);
+      VMThread::execute(&op);
+      break;
+    }
+    default: {
+      // In all other cases, we currently do a full gc.
+      VM_G1CollectFull op(gc_count_before, cause);
+      VMThread::execute(&op);
+    }
   }
 }
 
@@ -2119,7 +2132,7 @@
 }
 
 size_t G1CollectedHeap::max_capacity() const {
-  return _g1_committed.byte_size();
+  return g1_reserved_obj_bytes();
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
@@ -2492,6 +2505,7 @@
 }
 
 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
+  // always_do_update_barrier = false;
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
   // Call allocation profiler
   AllocationProfiler::iterate_since_last_gc();
@@ -2505,6 +2519,7 @@
   // is set.
   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
                         "derived pointer present"));
+  // always_do_update_barrier = true;
 }
 
 void G1CollectedHeap::do_collection_pause() {
@@ -2631,6 +2646,13 @@
 
 // </NEW PREDICTION>
 
+struct PrepareForRSScanningClosure : public HeapRegionClosure {
+  bool doHeapRegion(HeapRegion *r) {
+    r->rem_set()->set_iter_claimed(0);
+    return false;
+  }
+};
+
 void
 G1CollectedHeap::do_collection_pause_at_safepoint() {
   if (PrintHeapAtGC) {
@@ -2638,6 +2660,8 @@
   }
 
   {
+    ResourceMark rm;
+
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
     if (g1_policy()->in_young_gc_mode()) {
@@ -2649,8 +2673,6 @@
     if (g1_policy()->should_initiate_conc_mark())
       strcat(verbose_str, " (initial-mark)");
 
-    GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
-
     // if PrintGCDetails is on, we'll print long statistics information
     // in the collector policy code, so let's not print this as the output
     // is messy if we do.
@@ -2658,7 +2680,8 @@
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
 
-    ResourceMark rm;
+    TraceMemoryManagerStats tms(false /* fullGC */);
+
     assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
     assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
     guarantee(!is_gc_active(), "collection is not reentrant");
@@ -2768,6 +2791,8 @@
         gclog_or_tty->print_cr("\nAfter pause, heap:");
         print();
 #endif
+        PrepareForRSScanningClosure prepare_for_rs_scan;
+        collection_set_iterate(&prepare_for_rs_scan);
 
         setup_surviving_young_words();
 
@@ -2802,6 +2827,22 @@
           _young_list->reset_auxilary_lists();
         }
       } else {
+        if (_in_cset_fast_test != NULL) {
+          assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
+          FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
+          //  this is more for peace of mind; we're nulling them here and
+          // we're expecting them to be null at the beginning of the next GC
+          _in_cset_fast_test = NULL;
+          _in_cset_fast_test_base = NULL;
+        }
+        // This looks confusing, because the DPT should really be empty
+        // at this point -- since we have not done any collection work,
+        // there should not be any derived pointers in the table to update;
+        // however, there is some additional state in the DPT which is
+        // reset at the end of the (null) "gc" here via the following call.
+        // A better approach might be to split off that state resetting work
+        // into a separate method that asserts that the DPT is empty and call
+        // that here. That is deferred for now.
         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
       }
 
@@ -2838,6 +2879,8 @@
 
       assert(regions_accounted_for(), "Region leakage.");
 
+      MemoryService::track_memory_usage();
+
       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
         HandleMark hm;  // Discard invalid handles created during verification
         gclog_or_tty->print(" VerifyAfterGC:");
@@ -3747,22 +3790,16 @@
   return obj;
 }
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test>
+template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
 template <class T>
-void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test>
+void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee>
 ::do_oop_work(T* p) {
   oop obj = oopDesc::load_decode_heap_oop(p);
   assert(barrier != G1BarrierRS || obj != NULL,
          "Precondition: G1BarrierRS implies obj is nonNull");
 
-  // The only time we skip the cset test is when we're scanning
-  // references popped from the queue. And we only push on the queue
-  // references that we know point into the cset, so no point in
-  // checking again. But we'll leave an assert here for peace of mind.
-  assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
-
   // here the null check is implicit in the cset_fast_test() test
-  if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
+  if (_g1->in_cset_fast_test(obj)) {
 #if G1_REM_SET_LOGGING
     gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
                            "into CS.", p, (void*) obj);
@@ -3779,7 +3816,6 @@
     }
   }
 
-  // When scanning moved objs, must look at all oops.
   if (barrier == G1BarrierEvac && obj != NULL) {
     _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
   }
@@ -3789,8 +3825,8 @@
   }
 }
 
-template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
-template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p);
+template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
+template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
 
 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
   assert(has_partial_array_mask(p), "invariant");
@@ -3862,11 +3898,11 @@
           assert(UseCompressedOops, "Error");
           narrowOop* p = (narrowOop*) stolen_task;
           assert(has_partial_array_mask(p) ||
-                 _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error");
+                 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error");
           pss->push_on_queue(p);
         } else {
           oop* p = (oop*) stolen_task;
-          assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error");
+          assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error");
           pss->push_on_queue(p);
         }
         continue;
@@ -3928,6 +3964,7 @@
     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
+    G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
 
     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
@@ -3951,7 +3988,7 @@
     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
                                   SharedHeap::SO_AllClasses,
                                   scan_root_cl,
-                                  &only_scan_heap_rs_cl,
+                                  &push_heap_rs_cl,
                                   scan_so_cl,
                                   scan_perm_cl,
                                   i);
@@ -4209,10 +4246,11 @@
     RedirtyLoggedCardTableEntryFastClosure redirty;
     dirty_card_queue_set().set_closure(&redirty);
     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
-    JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
+
+    DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
+    dcq.merge_bufferlists(&dirty_card_queue_set());
     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
   }
-
   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -692,7 +692,7 @@
 
   // Reserved (g1 only; super method includes perm), capacity and the used
   // portion in bytes.
-  size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); }
+  size_t g1_reserved_obj_bytes() const { return _g1_reserved.byte_size(); }
   virtual size_t capacity() const;
   virtual size_t used() const;
   // This should be called when we're not holding the heap lock. The
@@ -1004,6 +1004,15 @@
   // storage in the heap comes from a young region or not.
   // See ReduceInitialCardMarks.
   virtual bool can_elide_tlab_store_barriers() const {
+    // 6920090: Temporarily disabled, because of lingering
+    // instabilities related to RICM with G1. In the
+    // interim, the option ReduceInitialCardMarksForG1
+    // below is left solely as a debugging device at least
+    // until 6920109 fixes the instabilities.
+    return ReduceInitialCardMarksForG1;
+  }
+
+  virtual bool card_mark_must_follow_store() const {
     return true;
   }
 
@@ -1022,6 +1031,8 @@
   // However, non-generational G1 (-XX:-G1Gen) appears to have
   // bit-rotted so was not tested below.
   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
+    // Re 6920090, 6920109 above.
+    assert(ReduceInitialCardMarksForG1, "Else cannot be here");
     assert(G1Gen || !is_in_young(new_obj),
            "Non-generational G1 should never return true below");
     return is_in_young(new_obj);
@@ -1612,7 +1623,7 @@
   template <class T> void push_on_queue(T* ref) {
     assert(ref != NULL, "invariant");
     assert(has_partial_array_mask(ref) ||
-           _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(ref)), "invariant");
+           _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
 #ifdef ASSERT
     if (has_partial_array_mask(ref)) {
       oop p = clear_partial_array_mask(ref);
@@ -1633,9 +1644,9 @@
       assert((oop*)ref != NULL, "pop_local() returned true");
       assert(UseCompressedOops || !ref.is_narrow(), "Error");
       assert(has_partial_array_mask((oop*)ref) ||
-             _g1h->obj_in_cs(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
-                                             : oopDesc::load_decode_heap_oop((oop*)ref)),
-             "invariant");
+             _g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
+                                                     : oopDesc::load_decode_heap_oop((oop*)ref)),
+              "invariant");
       IF_G1_DETAILED_STATS(note_pop());
     } else {
       StarTask null_task;
@@ -1648,9 +1659,9 @@
     assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
     assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
     assert(has_partial_array_mask((oop*)new_ref) ||
-           _g1h->obj_in_cs(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
-                                               : oopDesc::load_decode_heap_oop((oop*)new_ref)),
-             "invariant");
+           _g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
+                                                       : oopDesc::load_decode_heap_oop((oop*)new_ref)),
+           "invariant");
     ref = new_ref;
   }
 
@@ -1814,12 +1825,12 @@
           assert(UseCompressedOops, "Error");
           narrowOop* p = (narrowOop*)ref_to_scan;
           assert(!has_partial_array_mask(p) &&
-                 _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
+                 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
           deal_with_reference(p);
         } else {
           oop* p = (oop*)ref_to_scan;
-          assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
-                 _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
+          assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
+                 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
           deal_with_reference(p);
         }
       }
@@ -1833,12 +1844,12 @@
             assert(UseCompressedOops, "Error");
             narrowOop* p = (narrowOop*)ref_to_scan;
             assert(!has_partial_array_mask(p) &&
-                   _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
+                    _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
             deal_with_reference(p);
           } else {
             oop* p = (oop*)ref_to_scan;
             assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
-                  _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
+                   _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
             deal_with_reference(p);
           }
         }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -205,6 +205,7 @@
   // policy is created before the heap, we have to set this up here,
   // so it's done as soon as possible.
   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
+  HeapRegionRemSet::setup_remset_size();
 
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
@@ -1516,8 +1517,30 @@
       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
     update_recent_gc_times(end_time_sec, elapsed_ms);
     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
-    // using 1.01 to account for floating point inaccuracies
-    assert(recent_avg_pause_time_ratio() < 1.01, "All GC?");
+    if (recent_avg_pause_time_ratio() < 0.0 ||
+        (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
+#ifndef PRODUCT
+      // Dump info to allow post-facto debugging
+      gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
+      gclog_or_tty->print_cr("-------------------------------------------");
+      gclog_or_tty->print_cr("Recent GC Times (ms):");
+      _recent_gc_times_ms->dump();
+      gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
+      _recent_prev_end_times_for_all_gcs_sec->dump();
+      gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
+                             _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
+      // In debug mode, terminate the JVM if the user wants to debug at this point.
+      assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
+#endif  // !PRODUCT
+      // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
+      // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
+      if (_recent_avg_pause_time_ratio < 0.0) {
+        _recent_avg_pause_time_ratio = 0.0;
+      } else {
+        assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
+        _recent_avg_pause_time_ratio = 1.0;
+      }
+    }
   }
 
   if (G1PolicyVerbose > 1) {
@@ -1892,6 +1915,10 @@
   calculate_young_list_min_length();
   calculate_young_list_target_config();
 
+  // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
+  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSUpdatePauseFractionPercent / 100.0;
+  adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
+
   // </NEW PREDICTION>
 
   _target_pause_time_ms = -1.0;
@@ -1899,6 +1926,47 @@
 
 // <NEW PREDICTION>
 
+void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
+                                                     double update_rs_processed_buffers,
+                                                     double goal_ms) {
+  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+  ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
+
+  if (G1AdaptiveConcRefine) {
+    const int k_gy = 3, k_gr = 6;
+    const double inc_k = 1.1, dec_k = 0.9;
+
+    int g = cg1r->green_zone();
+    if (update_rs_time > goal_ms) {
+      g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
+    } else {
+      if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
+        g = (int)MAX2(g * inc_k, g + 1.0);
+      }
+    }
+    // Change the refinement threads params
+    cg1r->set_green_zone(g);
+    cg1r->set_yellow_zone(g * k_gy);
+    cg1r->set_red_zone(g * k_gr);
+    cg1r->reinitialize_threads();
+
+    int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
+    int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
+                                    cg1r->yellow_zone());
+    // Change the barrier params
+    dcqs.set_process_completed_threshold(processing_threshold);
+    dcqs.set_max_completed_queue(cg1r->red_zone());
+  }
+
+  int curr_queue_size = dcqs.completed_buffers_num();
+  if (curr_queue_size >= cg1r->yellow_zone()) {
+    dcqs.set_completed_queue_padding(curr_queue_size);
+  } else {
+    dcqs.set_completed_queue_padding(0);
+  }
+  dcqs.notify_if_necessary();
+}
+
 double
 G1CollectorPolicy::
 predict_young_collection_elapsed_time_ms(size_t adjustment) {
@@ -2825,8 +2893,15 @@
   double non_young_start_time_sec;
   start_recording_regions();
 
-  guarantee(_target_pause_time_ms > -1.0,
+  guarantee(_target_pause_time_ms > -1.0
+            NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
             "_target_pause_time_ms should have been set!");
+#ifndef PRODUCT
+  if (_target_pause_time_ms <= -1.0) {
+    assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
+    _target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
+  }
+#endif
   assert(_collection_set == NULL, "Precondition");
 
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
@@ -2972,7 +3047,3 @@
   G1CollectorPolicy::record_collection_pause_end(abandoned);
   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
 }
-
-// Local Variables: ***
-// c-indentation-style: gnu ***
-// End: ***
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -316,6 +316,10 @@
   bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
 #endif // PRODUCT
 
+  void adjust_concurrent_refinement(double update_rs_time,
+                                    double update_rs_processed_buffers,
+                                    double goal_ms);
+
 protected:
   double _pause_time_target_ms;
   double _recorded_young_cset_choice_time_ms;
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -86,12 +86,22 @@
     //   increase the array size (:-)
     //   remove the oldest entry (this might allow more GC time for
     //     the time slice than what's allowed)
-    //   concolidate the two entries with the minimum gap between them
-    //     (this mighte allow less GC time than what's allowed)
-    guarantee(0, "array full, currently we can't recover");
+    //   consolidate the two entries with the minimum gap between them
+    //     (this might allow less GC time than what's allowed)
+    guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
+              "array full, currently we can't recover unless +G1ForgetfulMMUTracker");
+    // In the case where ScavengeALot is true, such overflow is not
+    // uncommon; in such cases, we can, without much loss of precision
+    // or performance (we are GC'ing most of the time anyway!),
+    // simply overwrite the oldest entry in the tracker: this
+    // is also the behaviour when G1ForgetfulMMUTracker is enabled.
+    _head_index = trim_index(_head_index + 1);
+    assert(_head_index == _tail_index, "Because we have a full circular buffer");
+    _tail_index = trim_index(_tail_index + 1);
+  } else {
+    _head_index = trim_index(_head_index + 1);
+    ++_no_entries;
   }
-  _head_index = trim_index(_head_index + 1);
-  ++_no_entries;
   _array[_head_index] = G1MMUTrackerQueueElem(start, end);
 }
 
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -99,7 +99,10 @@
   // The array is of fixed size and I don't think we'll need more than
   // two or three entries with the current behaviour of G1 pauses.
   // If the array is full, an easy fix is to look for the pauses with
-  // the shortest gap between them and concolidate them.
+  // the shortest gap between them and consolidate them.
+  // For now, we have taken the expedient alternative of forgetting
+  // the oldest entry in the event that +G1ForgetfulMMUTracker, thus
+  // potentially violating MMU specs for some time thereafter.
 
   G1MMUTrackerQueueElem _array[QueueLength];
   int                   _head_index;
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -53,6 +53,15 @@
   bool apply_to_weak_ref_discovered_field() { return true; }
 };
 
+class G1ParPushHeapRSClosure : public G1ParClosureSuper {
+public:
+  G1ParPushHeapRSClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
+    G1ParClosureSuper(g1, par_scan_state) { }
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p)          { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
+};
+
 class G1ParScanClosure : public G1ParClosureSuper {
 public:
   G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
@@ -100,7 +109,7 @@
 };
 
 template<bool do_gen_barrier, G1Barrier barrier,
-         bool do_mark_forwardee, bool skip_cset_test>
+         bool do_mark_forwardee>
 class G1ParCopyClosure : public G1ParCopyHelper {
   G1ParScanClosure _scanner;
   template <class T> void do_oop_work(T* p);
@@ -116,12 +125,13 @@
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
-typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
-typedef G1ParCopyClosure<true,  G1BarrierNone, false, false> G1ParScanPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS,   false, false> G1ParScanHeapRSClosure;
-typedef G1ParCopyClosure<false, G1BarrierNone, true,  false> G1ParScanAndMarkExtRootClosure;
-typedef G1ParCopyClosure<true,  G1BarrierNone, true,  false> G1ParScanAndMarkPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS,   true,  false> G1ParScanAndMarkHeapRSClosure;
+typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
+typedef G1ParCopyClosure<true,  G1BarrierNone, false> G1ParScanPermClosure;
+typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
+typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
+typedef G1ParCopyClosure<true,  G1BarrierNone, true> G1ParScanAndMarkPermClosure;
+typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
+
 // This is the only case when we set skip_cset_test. Basically, this
 // closure is (should?) only be called directly while we're draining
 // the overflow and task queues. In that case we know that the
@@ -132,7 +142,7 @@
 // We need a separate closure to handle references during evacuation
 // failure processing, as we cannot asume that the reference already
 // points into the collection set (like G1ParScanHeapEvacClosure does).
-typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
+typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 
 class FilterIntoCSClosure: public OopClosure {
   G1CollectedHeap* _g1;
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -104,3 +104,16 @@
     }
   }
 }
+
+template <class T> inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (_g1->in_cset_fast_test(obj)) {
+      Prefetch::write(obj->mark_addr(), 0);
+      Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
+      _par_scan_state->push_on_queue(p);
+    }
+  }
+}
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -155,8 +155,8 @@
   G1BlockOffsetSharedArray* _bot_shared;
   CardTableModRefBS *_ct_bs;
   int _worker_i;
+  int _block_size;
   bool _try_claimed;
-  size_t _min_skip_distance, _max_skip_distance;
 public:
   ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
     _oc(oc),
@@ -168,8 +168,7 @@
     _g1h = G1CollectedHeap::heap();
     _bot_shared = _g1h->bot_shared();
     _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
-    _min_skip_distance = 16;
-    _max_skip_distance = 2 * _g1h->n_par_threads() * _min_skip_distance;
+    _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
   }
 
   void set_try_claimed() { _try_claimed = true; }
@@ -225,12 +224,15 @@
     HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
     hrrs->init_iterator(iter);
     size_t card_index;
-    size_t skip_distance = 0, current_card = 0, jump_to_card = 0;
-    while (iter->has_next(card_index)) {
-      if (current_card < jump_to_card) {
-        ++current_card;
-        continue;
+
+    // We claim cards in block so as to recude the contention. The block size is determined by
+    // the G1RSetScanBlockSize parameter.
+    size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
+    for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
+      if (current_card >= jump_to_card + _block_size) {
+        jump_to_card = hrrs->iter_claimed_next(_block_size);
       }
+      if (current_card < jump_to_card) continue;
       HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
 #if 0
       gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
@@ -247,22 +249,14 @@
 
        // If the card is dirty, then we will scan it during updateRS.
       if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
-          if (!_ct_bs->is_card_claimed(card_index) && _ct_bs->claim_card(card_index)) {
-            scanCard(card_index, card_region);
-          } else if (_try_claimed) {
-            if (jump_to_card == 0 || jump_to_card != current_card) {
-              // We did some useful work in the previous iteration.
-              // Decrease the distance.
-              skip_distance = MAX2(skip_distance >> 1, _min_skip_distance);
-            } else {
-              // Previous iteration resulted in a claim failure.
-              // Increase the distance.
-              skip_distance = MIN2(skip_distance << 1, _max_skip_distance);
-            }
-            jump_to_card = current_card + skip_distance;
-          }
+        // We make the card as "claimed" lazily (so races are possible but they're benign),
+        // which reduces the number of duplicate scans (the rsets of the regions in the cset
+        // can intersect).
+        if (!_ct_bs->is_card_claimed(card_index)) {
+          _ct_bs->set_card_claimed(card_index);
+          scanCard(card_index, card_region);
+        }
       }
-      ++current_card;
     }
     if (!_try_claimed) {
       hrrs->set_iter_complete();
@@ -299,30 +293,18 @@
   double rs_time_start = os::elapsedTime();
   HeapRegion *startRegion = calculateStartRegion(worker_i);
 
-  BufferingOopsInHeapRegionClosure boc(oc);
-  ScanRSClosure scanRScl(&boc, worker_i);
+  ScanRSClosure scanRScl(oc, worker_i);
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   scanRScl.set_try_claimed();
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
 
-  boc.done();
-  double closure_app_time_sec = boc.closure_app_seconds();
-  double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
-    closure_app_time_sec;
-  double closure_app_time_ms = closure_app_time_sec * 1000.0;
+  double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
 
   assert( _cards_scanned != NULL, "invariant" );
   _cards_scanned[worker_i] = scanRScl.cards_done();
 
   _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
   _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
-
-  double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i);
-  if (scan_new_refs_time_ms > 0.0) {
-    closure_app_time_ms += scan_new_refs_time_ms;
-  }
-
-  _g1p->record_obj_copy_time(worker_i, closure_app_time_ms);
 }
 
 void HRInto_G1RemSet::updateRS(int worker_i) {
@@ -449,9 +431,8 @@
       oc->do_oop(p);
     }
   }
-  _g1p->record_scan_new_refs_time(worker_i,
-                                  (os::elapsedTime() - scan_new_refs_start_sec)
-                                  * 1000.0);
+  double scan_new_refs_time_ms = (os::elapsedTime() - scan_new_refs_start_sec) * 1000.0;
+  _g1p->record_scan_new_refs_time(worker_i, scan_new_refs_time_ms);
 }
 
 void HRInto_G1RemSet::cleanupHRRS() {
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -85,7 +85,7 @@
   diagnostic(bool, G1SummarizeZFStats, false,                               \
           "Summarize zero-filling info")                                    \
                                                                             \
-  develop(bool, G1TraceConcurrentRefinement, false,                         \
+  diagnostic(bool, G1TraceConcurrentRefinement, false,                      \
           "Trace G1 concurrent refinement")                                 \
                                                                             \
   product(intx, G1MarkStackSize, 2 * 1024 * 1024,                           \
@@ -94,19 +94,6 @@
   product(intx, G1MarkRegionStackSize, 1024 * 1024,                         \
           "Size of the region stack for concurrent marking.")               \
                                                                             \
-  develop(bool, G1ConcRefine, true,                                         \
-          "If true, run concurrent rem set refinement for G1")              \
-                                                                            \
-  develop(intx, G1ConcRefineTargTraversals, 4,                              \
-          "Number of concurrent refinement we try to achieve")              \
-                                                                            \
-  develop(intx, G1ConcRefineInitialDelta, 4,                                \
-          "Number of heap regions of alloc ahead of starting collection "   \
-          "pause to start concurrent refinement (initially)")               \
-                                                                            \
-  develop(bool, G1SmoothConcRefine, true,                                   \
-          "Attempts to smooth out the overhead of concurrent refinement")   \
-                                                                            \
   develop(bool, G1ConcZeroFill, true,                                       \
           "If true, run concurrent zero-filling thread")                    \
                                                                             \
@@ -178,13 +165,38 @@
   product(intx, G1UpdateBufferSize, 256,                                    \
           "Size of an update buffer")                                       \
                                                                             \
-  product(intx, G1UpdateBufferQueueProcessingThreshold, 5,                  \
+  product(intx, G1ConcRefineYellowZone, 0,                                  \
           "Number of enqueued update buffers that will "                    \
-          "trigger concurrent processing")                                  \
+          "trigger concurrent processing. Will be selected ergonomically "  \
+          "by default.")                                                    \
+                                                                            \
+  product(intx, G1ConcRefineRedZone, 0,                                     \
+          "Maximum number of enqueued update buffers before mutator "       \
+          "threads start processing new ones instead of enqueueing them. "  \
+          "Will be selected ergonomically by default. Zero will disable "   \
+          "concurrent processing.")                                         \
+                                                                            \
+  product(intx, G1ConcRefineGreenZone, 0,                                   \
+          "The number of update buffers that are left in the queue by the " \
+          "concurrent processing threads. Will be selected ergonomically "  \
+          "by default.")                                                    \
                                                                             \
-  product(intx, G1UpdateBufferQueueMaxLength, 30,                           \
-          "Maximum number of enqueued update buffers before mutator "       \
-          "threads start processing new ones instead of enqueueing them")   \
+  product(intx, G1ConcRefineServiceInterval, 300,                           \
+          "The last concurrent refinement thread wakes up every "           \
+          "specified number of milliseconds to do miscellaneous work.")     \
+                                                                            \
+  product(intx, G1ConcRefineThresholdStep, 0,                               \
+          "Each time the rset update queue increases by this amount "       \
+          "activate the next refinement thread if available. "              \
+          "Will be selected ergonomically by default.")                     \
+                                                                            \
+  product(intx, G1RSUpdatePauseFractionPercent, 10,                         \
+          "A target percentage of time that is allowed to be spend on "     \
+          "process RS update buffers during the collection pause.")         \
+                                                                            \
+  product(bool, G1AdaptiveConcRefine, true,                                 \
+          "Select green, yellow and red zones adaptively to meet the "      \
+          "the pause requirements.")                                        \
                                                                             \
   develop(intx, G1ConcRSLogCacheSize, 10,                                   \
           "Log base 2 of the length of conc RS hot-card cache.")            \
@@ -195,8 +207,20 @@
   develop(bool, G1PrintOopAppls, false,                                     \
           "When true, print applications of closures to external locs.")    \
                                                                             \
-  develop(intx, G1LogRSRegionEntries, 7,                                    \
-          "Log_2 of max number of regions for which we keep bitmaps.")      \
+  develop(intx, G1RSetRegionEntriesBase, 256,                               \
+          "Max number of regions in a fine-grain table per MB.")            \
+                                                                            \
+  product(intx, G1RSetRegionEntries, 0,                                     \
+          "Max number of regions for which we keep bitmaps."                \
+          "Will be set ergonomically by default")                           \
+                                                                            \
+  develop(intx, G1RSetSparseRegionEntriesBase, 4,                           \
+          "Max number of entries per region in a sparse table "             \
+          "per MB.")                                                        \
+                                                                            \
+  product(intx, G1RSetSparseRegionEntries, 0,                               \
+          "Max number of entries per region in a sparse table."             \
+          "Will be set ergonomically by default.")                          \
                                                                             \
   develop(bool, G1RecordHRRSOops, false,                                    \
           "When true, record recent calls to rem set operations.")          \
@@ -242,6 +266,10 @@
   product(bool, G1UseSurvivorSpaces, true,                                  \
           "When true, use survivor space.")                                 \
                                                                             \
+  develop(bool, G1FailOnFPError, false,                                     \
+          "When set, G1 will fail when it encounters an FP 'error', "       \
+          "so as to allow debugging")                                       \
+                                                                            \
   develop(bool, G1FixedTenuringThreshold, false,                            \
           "When set, G1 will not adjust the tenuring threshold")            \
                                                                             \
@@ -252,6 +280,9 @@
           "If non-0 is the size of the G1 survivor space, "                 \
           "otherwise SurvivorRatio is used to determine the size")          \
                                                                             \
+  product(bool, G1ForgetfulMMUTracker, false,                               \
+          "If the MMU tracker's memory is full, forget the oldest entry")   \
+                                                                            \
   product(uintx, G1HeapRegionSize, 0,                                       \
           "Size of the G1 regions.")                                        \
                                                                             \
@@ -272,6 +303,14 @@
           "a particular entry exceeds this value.")                         \
                                                                             \
   develop(bool, G1VerifyCTCleanup, false,                                   \
-          "Verify card table cleanup.")
+          "Verify card table cleanup.")                                     \
+                                                                            \
+  product(uintx, G1RSetScanBlockSize, 64,                                   \
+          "Size of a work unit of cards claimed by a worker thread"         \
+          "during RSet scanning.")                                          \
+                                                                            \
+  develop(bool, ReduceInitialCardMarksForG1, false,                         \
+          "When ReduceInitialCardMarks is true, this flag setting "         \
+          " controls whether G1 allows the RICM optimization")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
--- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -33,11 +33,12 @@
 };
 
 template<bool do_gen_barrier, G1Barrier barrier,
-         bool do_mark_forwardee, bool skip_cset_test>
+         bool do_mark_forwardee>
 class G1ParCopyClosure;
 class G1ParScanClosure;
+class G1ParPushHeapRSClosure;
 
-typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
+typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
 
 class FilterIntoCSClosure;
 class FilterOutOfRegionClosure;
@@ -51,6 +52,7 @@
 #define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \
       f(G1ParScanHeapEvacClosure,_nv)                   \
       f(G1ParScanClosure,_nv)                           \
+      f(G1ParPushHeapRSClosure,_nv)                     \
       f(FilterIntoCSClosure,_nv)                        \
       f(FilterOutOfRegionClosure,_nv)                   \
       f(FilterInHeapRegionAndIntoCSClosure,_nv)         \
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -258,42 +258,6 @@
     ReserveParTableExpansion = 1
   };
 
-  void par_expand() {
-    int n = HeapRegionRemSet::num_par_rem_sets()-1;
-    if (n <= 0) return;
-    if (_par_tables == NULL) {
-      PerRegionTable* res =
-        (PerRegionTable*)
-        Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
-                            &_par_tables, NULL);
-      if (res != NULL) return;
-      // Otherwise, we reserved the right to do the expansion.
-
-      PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
-      for (int i = 0; i < n; i++) {
-        PerRegionTable* ptable = PerRegionTable::alloc(hr());
-        ptables[i] = ptable;
-      }
-      // Here we do not need an atomic.
-      _par_tables = ptables;
-#if COUNT_PAR_EXPANDS
-      print_par_expand();
-#endif
-      // We must put this table on the expanded list.
-      PosParPRT* exp_head = _par_expanded_list;
-      while (true) {
-        set_next_par_expanded(exp_head);
-        PosParPRT* res =
-          (PosParPRT*)
-          Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
-        if (res == exp_head) return;
-        // Otherwise.
-        exp_head = res;
-      }
-      ShouldNotReachHere();
-    }
-  }
-
   void par_contract() {
     assert(_par_tables != NULL, "Precondition.");
     int n = HeapRegionRemSet::num_par_rem_sets()-1;
@@ -391,13 +355,49 @@
   void set_next(PosParPRT* nxt) { _next = nxt; }
   PosParPRT** next_addr() { return &_next; }
 
+  bool should_expand(int tid) {
+    return par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region();
+  }
+
+  void par_expand() {
+    int n = HeapRegionRemSet::num_par_rem_sets()-1;
+    if (n <= 0) return;
+    if (_par_tables == NULL) {
+      PerRegionTable* res =
+        (PerRegionTable*)
+        Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
+                            &_par_tables, NULL);
+      if (res != NULL) return;
+      // Otherwise, we reserved the right to do the expansion.
+
+      PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
+      for (int i = 0; i < n; i++) {
+        PerRegionTable* ptable = PerRegionTable::alloc(hr());
+        ptables[i] = ptable;
+      }
+      // Here we do not need an atomic.
+      _par_tables = ptables;
+#if COUNT_PAR_EXPANDS
+      print_par_expand();
+#endif
+      // We must put this table on the expanded list.
+      PosParPRT* exp_head = _par_expanded_list;
+      while (true) {
+        set_next_par_expanded(exp_head);
+        PosParPRT* res =
+          (PosParPRT*)
+          Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
+        if (res == exp_head) return;
+        // Otherwise.
+        exp_head = res;
+      }
+      ShouldNotReachHere();
+    }
+  }
+
   void add_reference(OopOrNarrowOopStar from, int tid) {
     // Expand if necessary.
     PerRegionTable** pt = par_tables();
-    if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) {
-      par_expand();
-      pt = par_tables();
-    }
     if (pt != NULL) {
       // We always have to assume that mods to table 0 are in parallel,
       // because of the claiming scheme in parallel expansion.  A thread
@@ -505,12 +505,13 @@
   typedef PosParPRT* PosParPRTPtr;
   if (_max_fine_entries == 0) {
     assert(_mod_max_fine_entries_mask == 0, "Both or none.");
-    _max_fine_entries = (size_t)(1 << G1LogRSRegionEntries);
+    size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
+    _max_fine_entries = (size_t)(1 << max_entries_log);
     _mod_max_fine_entries_mask = _max_fine_entries - 1;
 #if SAMPLE_FOR_EVICTION
     assert(_fine_eviction_sample_size == 0
            && _fine_eviction_stride == 0, "All init at same time.");
-    _fine_eviction_sample_size = MAX2((size_t)4, (size_t)G1LogRSRegionEntries);
+    _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
     _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
 #endif
   }
@@ -655,13 +656,6 @@
 #endif
       }
 
-      // Otherwise, transfer from sparse to fine-grain.
-      CardIdx_t cards[SparsePRTEntry::CardsPerEntry];
-      if (G1HRRSUseSparseTable) {
-        bool res = _sparse_table.get_cards(from_hrs_ind, &cards[0]);
-        assert(res, "There should have been an entry");
-      }
-
       if (_n_fine_entries == _max_fine_entries) {
         prt = delete_region_table();
       } else {
@@ -676,10 +670,12 @@
       _fine_grain_regions[ind] = prt;
       _n_fine_entries++;
 
-      // Add in the cards from the sparse table.
       if (G1HRRSUseSparseTable) {
-        for (int i = 0; i < SparsePRTEntry::CardsPerEntry; i++) {
-          CardIdx_t c = cards[i];
+        // Transfer from sparse to fine-grain.
+        SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
+        assert(sprt_entry != NULL, "There should have been an entry");
+        for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
+          CardIdx_t c = sprt_entry->card(i);
           if (c != SparsePRTEntry::NullEntry) {
             prt->add_card(c);
           }
@@ -696,7 +692,21 @@
   // OtherRegionsTable for why this is OK.
   assert(prt != NULL, "Inv");
 
-  prt->add_reference(from, tid);
+  if (prt->should_expand(tid)) {
+    MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
+    HeapRegion* prt_hr = prt->hr();
+    if (prt_hr == from_hr) {
+      // Make sure the table still corresponds to the same region
+      prt->par_expand();
+      prt->add_reference(from, tid);
+    }
+    // else: The table has been concurrently coarsened, evicted, and
+    // the table data structure re-used for another table. So, we
+    // don't need to add the reference any more given that the table
+    // has been coarsened and the whole region will be scanned anyway.
+  } else {
+    prt->add_reference(from, tid);
+  }
   if (G1RecordHRRSOops) {
     HeapRegionRemSet::record(hr(), from);
 #if HRRS_VERBOSE
@@ -1070,6 +1080,19 @@
 {}
 
 
+void HeapRegionRemSet::setup_remset_size() {
+  // Setup sparse and fine-grain tables sizes.
+  // table_size = base * (log(region_size / 1M) + 1)
+  int region_size_log_mb = MAX2((int)HeapRegion::LogOfHRGrainBytes - (int)LOG_M, 0);
+  if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
+    G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
+  }
+  if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
+    G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
+  }
+  guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
+}
+
 void HeapRegionRemSet::init_for_par_iteration() {
   _iter_state = Unclaimed;
 }
@@ -1385,7 +1408,7 @@
   os::sleep(Thread::current(), (jlong)5000, false);
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-  // Run with "-XX:G1LogRSRegionEntries=2", so that 1 and 5 end up in same
+  // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
   // hash bucket.
   HeapRegion* hr0 = g1h->region_at(0);
   HeapRegion* hr1 = g1h->region_at(1);
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -187,7 +187,8 @@
   void clear_outgoing_entries();
 
   enum ParIterState { Unclaimed, Claimed, Complete };
-  ParIterState _iter_state;
+  volatile ParIterState _iter_state;
+  volatile jlong _iter_claimed;
 
   // Unused unless G1RecordHRRSOops is true.
 
@@ -209,6 +210,7 @@
                    HeapRegion* hr);
 
   static int num_par_rem_sets();
+  static void setup_remset_size();
 
   HeapRegion* hr() const {
     return _other_regions.hr();
@@ -272,6 +274,19 @@
   // Returns "true" iff the region's iteration is complete.
   bool iter_is_complete();
 
+  // Support for claiming blocks of cards during iteration
+  void set_iter_claimed(size_t x) { _iter_claimed = (jlong)x; }
+  size_t iter_claimed() const { return (size_t)_iter_claimed; }
+  // Claim the next block of cards
+  size_t iter_claimed_next(size_t step) {
+    size_t current, next;
+    do {
+      current = iter_claimed();
+      next = current + step;
+    } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
+    return current;
+  }
+
   // Initialize the given iterator to iterate over this rem set.
   void init_iterator(HeapRegionRemSetIterator* iter) const;
 
--- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -64,8 +64,8 @@
   while (_index == 0) {
     handle_zero_index();
   }
+
   assert(_index > 0, "postcondition");
-
   _index -= oopSize;
   _buf[byte_index_to_index((int)_index)] = ptr;
   assert(0 <= _index && _index <= _sz, "Invariant.");
@@ -73,7 +73,12 @@
 
 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
   assert(_lock->owned_by_self(), "Required.");
+
+  // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
+  // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
+  // have the same rank and we may get the "possible deadlock" message
   _lock->unlock();
+
   qset()->enqueue_complete_buffer(buf);
   // We must relock only because the caller will unlock, for the normal
   // case.
@@ -99,94 +104,139 @@
   assert(_sz > 0, "Didn't set a buffer size.");
   MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
   if (_fl_owner->_buf_free_list != NULL) {
-    void** res = _fl_owner->_buf_free_list;
-    _fl_owner->_buf_free_list = (void**)_fl_owner->_buf_free_list[0];
+    void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
+    _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
     _fl_owner->_buf_free_list_sz--;
-    // Just override the next pointer with NULL, just in case we scan this part
-    // of the buffer.
-    res[0] = NULL;
     return res;
   } else {
-    return NEW_C_HEAP_ARRAY(void*, _sz);
+    // Allocate space for the BufferNode in front of the buffer.
+    char *b =  NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size());
+    return BufferNode::make_buffer_from_block(b);
   }
 }
 
 void PtrQueueSet::deallocate_buffer(void** buf) {
   assert(_sz > 0, "Didn't set a buffer size.");
   MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
-  buf[0] = (void*)_fl_owner->_buf_free_list;
-  _fl_owner->_buf_free_list = buf;
+  BufferNode *node = BufferNode::make_node_from_buffer(buf);
+  node->set_next(_fl_owner->_buf_free_list);
+  _fl_owner->_buf_free_list = node;
   _fl_owner->_buf_free_list_sz++;
 }
 
 void PtrQueueSet::reduce_free_list() {
+  assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
   // For now we'll adopt the strategy of deleting half.
   MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
   size_t n = _buf_free_list_sz / 2;
   while (n > 0) {
     assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
-    void** head = _buf_free_list;
-    _buf_free_list = (void**)_buf_free_list[0];
-    FREE_C_HEAP_ARRAY(void*,head);
+    void* b = BufferNode::make_block_from_node(_buf_free_list);
+    _buf_free_list = _buf_free_list->next();
+    FREE_C_HEAP_ARRAY(char, b);
+    _buf_free_list_sz --;
     n--;
   }
 }
 
-void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index, bool ignore_max_completed) {
-  // I use explicit locking here because there's a bailout in the middle.
-  _cbl_mon->lock_without_safepoint_check();
+void PtrQueue::handle_zero_index() {
+  assert(0 == _index, "Precondition.");
+  // This thread records the full buffer and allocates a new one (while
+  // holding the lock if there is one).
+  if (_buf != NULL) {
+    if (_lock) {
+      assert(_lock->owned_by_self(), "Required.");
+
+      // The current PtrQ may be the shared dirty card queue and
+      // may be being manipulated by more than one worker thread
+      // during a pause. Since the enqueuing of the completed
+      // buffer unlocks the Shared_DirtyCardQ_lock more than one
+      // worker thread can 'race' on reading the shared queue attributes
+      // (_buf and _index) and multiple threads can call into this
+      // routine for the same buffer. This will cause the completed
+      // buffer to be added to the CBL multiple times.
 
-  Thread* thread = Thread::current();
-  assert( ignore_max_completed ||
-          thread->is_Java_thread() ||
-          SafepointSynchronize::is_at_safepoint(),
-          "invariant" );
-  ignore_max_completed = ignore_max_completed || !thread->is_Java_thread();
+      // We "claim" the current buffer by caching value of _buf in
+      // a local and clearing the field while holding _lock. When
+      // _lock is released (while enqueueing the completed buffer)
+      // the thread that acquires _lock will skip this code,
+      // preventing the subsequent the multiple enqueue, and
+      // install a newly allocated buffer below.
+
+      void** buf = _buf;   // local pointer to completed buffer
+      _buf = NULL;         // clear shared _buf field
+
+      locking_enqueue_completed_buffer(buf);  // enqueue completed buffer
+
+      // While the current thread was enqueuing the buffer another thread
+      // may have a allocated a new buffer and inserted it into this pointer
+      // queue. If that happens then we just return so that the current
+      // thread doesn't overwrite the buffer allocated by the other thread
+      // and potentially losing some dirtied cards.
 
-  if (!ignore_max_completed && _max_completed_queue > 0 &&
-      _n_completed_buffers >= (size_t) _max_completed_queue) {
-    _cbl_mon->unlock();
-    bool b = mut_process_buffer(buf);
-    if (b) {
-      deallocate_buffer(buf);
-      return;
+      if (_buf != NULL) return;
+    } else {
+      if (qset()->process_or_enqueue_complete_buffer(_buf)) {
+        // Recycle the buffer. No allocation.
+        _sz = qset()->buffer_size();
+        _index = _sz;
+        return;
+      }
     }
+  }
+  // Reallocate the buffer
+  _buf = qset()->allocate_buffer();
+  _sz = qset()->buffer_size();
+  _index = _sz;
+  assert(0 <= _index && _index <= _sz, "Invariant.");
+}
 
-    // Otherwise, go ahead and enqueue the buffer.  Must reaquire the lock.
-    _cbl_mon->lock_without_safepoint_check();
+bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
+  if (Thread::current()->is_Java_thread()) {
+    // We don't lock. It is fine to be epsilon-precise here.
+    if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
+        _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
+      bool b = mut_process_buffer(buf);
+      if (b) {
+        // True here means that the buffer hasn't been deallocated and the caller may reuse it.
+        return true;
+      }
+    }
   }
+  // The buffer will be enqueued. The caller will have to get a new one.
+  enqueue_complete_buffer(buf);
+  return false;
+}
 
-  // Here we still hold the _cbl_mon.
-  CompletedBufferNode* cbn = new CompletedBufferNode;
-  cbn->buf = buf;
-  cbn->next = NULL;
-  cbn->index = index;
+void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
+  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+  BufferNode* cbn = BufferNode::new_from_buffer(buf);
+  cbn->set_index(index);
   if (_completed_buffers_tail == NULL) {
     assert(_completed_buffers_head == NULL, "Well-formedness");
     _completed_buffers_head = cbn;
     _completed_buffers_tail = cbn;
   } else {
-    _completed_buffers_tail->next = cbn;
+    _completed_buffers_tail->set_next(cbn);
     _completed_buffers_tail = cbn;
   }
   _n_completed_buffers++;
 
-  if (!_process_completed &&
+  if (!_process_completed && _process_completed_threshold >= 0 &&
       _n_completed_buffers >= _process_completed_threshold) {
     _process_completed = true;
     if (_notify_when_complete)
-      _cbl_mon->notify_all();
+      _cbl_mon->notify();
   }
   debug_only(assert_completed_buffer_list_len_correct_locked());
-  _cbl_mon->unlock();
 }
 
 int PtrQueueSet::completed_buffers_list_length() {
   int n = 0;
-  CompletedBufferNode* cbn = _completed_buffers_head;
+  BufferNode* cbn = _completed_buffers_head;
   while (cbn != NULL) {
     n++;
-    cbn = cbn->next;
+    cbn = cbn->next();
   }
   return n;
 }
@@ -197,7 +247,7 @@
 }
 
 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
-  guarantee((size_t)completed_buffers_list_length() ==  _n_completed_buffers,
+  guarantee(completed_buffers_list_length() ==  _n_completed_buffers,
             "Completed buffer length is wrong.");
 }
 
@@ -206,12 +256,8 @@
   _sz = sz * oopSize;
 }
 
-void PtrQueueSet::set_process_completed_threshold(size_t sz) {
-  _process_completed_threshold = sz;
-}
-
-// Merge lists of buffers. Notify waiting threads if the length of the list
-// exceeds threshold. The source queue is emptied as a result. The queues
+// Merge lists of buffers. Notify the processing threads.
+// The source queue is emptied as a result. The queues
 // must share the monitor.
 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
   assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
@@ -223,7 +269,7 @@
   } else {
     assert(_completed_buffers_head != NULL, "Well formedness");
     if (src->_completed_buffers_head != NULL) {
-      _completed_buffers_tail->next = src->_completed_buffers_head;
+      _completed_buffers_tail->set_next(src->_completed_buffers_head);
       _completed_buffers_tail = src->_completed_buffers_tail;
     }
   }
@@ -236,31 +282,13 @@
   assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
          _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
          "Sanity");
-
-  if (!_process_completed &&
-      _n_completed_buffers >= _process_completed_threshold) {
-    _process_completed = true;
-    if (_notify_when_complete)
-      _cbl_mon->notify_all();
-  }
 }
 
-// Merge free lists of the two queues. The free list of the source
-// queue is emptied as a result. The queues must share the same
-// mutex that guards free lists.
-void PtrQueueSet::merge_freelists(PtrQueueSet* src) {
-  assert(_fl_lock == src->_fl_lock, "Should share the same lock");
-  MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
-  if (_buf_free_list != NULL) {
-    void **p = _buf_free_list;
-    while (*p != NULL) {
-      p = (void**)*p;
-    }
-    *p = src->_buf_free_list;
-  } else {
-    _buf_free_list = src->_buf_free_list;
+void PtrQueueSet::notify_if_necessary() {
+  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+  if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
+    _process_completed = true;
+    if (_notify_when_complete)
+      _cbl_mon->notify();
   }
-  _buf_free_list_sz += src->_buf_free_list_sz;
-  src->_buf_free_list = NULL;
-  src->_buf_free_list_sz = 0;
 }
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -27,8 +27,10 @@
 // the addresses of modified old-generation objects.  This type supports
 // this operation.
 
+// The definition of placement operator new(size_t, void*) in the <new>.
+#include <new>
+
 class PtrQueueSet;
-
 class PtrQueue VALUE_OBJ_CLASS_SPEC {
 
 protected:
@@ -77,7 +79,7 @@
     else enqueue_known_active(ptr);
   }
 
-  inline void handle_zero_index();
+  void handle_zero_index();
   void locking_enqueue_completed_buffer(void** buf);
 
   void enqueue_known_active(void* ptr);
@@ -126,34 +128,65 @@
 
 };
 
+class BufferNode {
+  size_t _index;
+  BufferNode* _next;
+public:
+  BufferNode() : _index(0), _next(NULL) { }
+  BufferNode* next() const     { return _next;  }
+  void set_next(BufferNode* n) { _next = n;     }
+  size_t index() const         { return _index; }
+  void set_index(size_t i)     { _index = i;    }
+
+  // Align the size of the structure to the size of the pointer
+  static size_t aligned_size() {
+    static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*));
+    return alignment;
+  }
+
+  // BufferNode is allocated before the buffer.
+  // The chunk of memory that holds both of them is a block.
+
+  // Produce a new BufferNode given a buffer.
+  static BufferNode* new_from_buffer(void** buf) {
+    return new (make_block_from_buffer(buf)) BufferNode;
+  }
+
+  // The following are the required conversion routines:
+  static BufferNode* make_node_from_buffer(void** buf) {
+    return (BufferNode*)make_block_from_buffer(buf);
+  }
+  static void** make_buffer_from_node(BufferNode *node) {
+    return make_buffer_from_block(node);
+  }
+  static void* make_block_from_node(BufferNode *node) {
+    return (void*)node;
+  }
+  static void** make_buffer_from_block(void* p) {
+    return (void**)((char*)p + aligned_size());
+  }
+  static void* make_block_from_buffer(void** p) {
+    return (void*)((char*)p - aligned_size());
+  }
+};
+
 // A PtrQueueSet represents resources common to a set of pointer queues.
 // In particular, the individual queues allocate buffers from this shared
 // set, and return completed buffers to the set.
 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
 class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
-
 protected:
-
-  class CompletedBufferNode: public CHeapObj {
-  public:
-    void** buf;
-    size_t index;
-    CompletedBufferNode* next;
-    CompletedBufferNode() : buf(NULL),
-      index(0), next(NULL){ }
-  };
-
   Monitor* _cbl_mon;  // Protects the fields below.
-  CompletedBufferNode* _completed_buffers_head;
-  CompletedBufferNode* _completed_buffers_tail;
-  size_t _n_completed_buffers;
-  size_t _process_completed_threshold;
+  BufferNode* _completed_buffers_head;
+  BufferNode* _completed_buffers_tail;
+  int _n_completed_buffers;
+  int _process_completed_threshold;
   volatile bool _process_completed;
 
   // This (and the interpretation of the first element as a "next"
   // pointer) are protected by the TLOQ_FL_lock.
   Mutex* _fl_lock;
-  void** _buf_free_list;
+  BufferNode* _buf_free_list;
   size_t _buf_free_list_sz;
   // Queue set can share a freelist. The _fl_owner variable
   // specifies the owner. It is set to "this" by default.
@@ -170,6 +203,7 @@
   // Maximum number of elements allowed on completed queue: after that,
   // enqueuer does the work itself.  Zero indicates no maximum.
   int _max_completed_queue;
+  int _completed_queue_padding;
 
   int completed_buffers_list_length();
   void assert_completed_buffer_list_len_correct_locked();
@@ -191,9 +225,12 @@
   // Because of init-order concerns, we can't pass these as constructor
   // arguments.
   void initialize(Monitor* cbl_mon, Mutex* fl_lock,
-                  int max_completed_queue = 0,
+                  int process_completed_threshold,
+                  int max_completed_queue,
                   PtrQueueSet *fl_owner = NULL) {
     _max_completed_queue = max_completed_queue;
+    _process_completed_threshold = process_completed_threshold;
+    _completed_queue_padding = 0;
     assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
     _cbl_mon = cbl_mon;
     _fl_lock = fl_lock;
@@ -208,14 +245,17 @@
   void deallocate_buffer(void** buf);
 
   // Declares that "buf" is a complete buffer.
-  void enqueue_complete_buffer(void** buf, size_t index = 0,
-                               bool ignore_max_completed = false);
+  void enqueue_complete_buffer(void** buf, size_t index = 0);
+
+  // To be invoked by the mutator.
+  bool process_or_enqueue_complete_buffer(void** buf);
 
   bool completed_buffers_exist_dirty() {
     return _n_completed_buffers > 0;
   }
 
   bool process_completed_buffers() { return _process_completed; }
+  void set_process_completed(bool x) { _process_completed = x; }
 
   bool active() { return _all_active; }
 
@@ -226,15 +266,24 @@
   // Get the buffer size.
   size_t buffer_size() { return _sz; }
 
-  // Set the number of completed buffers that triggers log processing.
-  void set_process_completed_threshold(size_t sz);
+  // Get/Set the number of completed buffers that triggers log processing.
+  void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
+  int process_completed_threshold() const { return _process_completed_threshold; }
 
   // Must only be called at a safe point.  Indicates that the buffer free
   // list size may be reduced, if that is deemed desirable.
   void reduce_free_list();
 
-  size_t completed_buffers_num() { return _n_completed_buffers; }
+  int completed_buffers_num() { return _n_completed_buffers; }
 
   void merge_bufferlists(PtrQueueSet* src);
-  void merge_freelists(PtrQueueSet* src);
+
+  void set_max_completed_queue(int m) { _max_completed_queue = m; }
+  int max_completed_queue() { return _max_completed_queue; }
+
+  void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
+  int completed_queue_padding() { return _completed_queue_padding; }
+
+  // Notify the consumer if the number of buffers crossed the threshold
+  void notify_if_necessary();
 };
--- a/src/share/vm/gc_implementation/g1/ptrQueue.inline.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-void PtrQueue::handle_zero_index() {
-  assert(0 == _index, "Precondition.");
-  // This thread records the full buffer and allocates a new one (while
-  // holding the lock if there is one).
-  void** buf = _buf;
-  _buf = qset()->allocate_buffer();
-  _sz = qset()->buffer_size();
-  _index = _sz;
-  assert(0 <= _index && _index <= _sz, "Invariant.");
-  if (buf != NULL) {
-    if (_lock) {
-      locking_enqueue_completed_buffer(buf);
-    } else {
-      qset()->enqueue_complete_buffer(buf);
-    }
-  }
-}
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -67,9 +67,9 @@
 {}
 
 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
-                                  int max_completed_queue,
+                                  int process_completed_threshold,
                                   Mutex* lock) {
-  PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue);
+  PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
   _shared_satb_queue.set_lock(lock);
   if (ParallelGCThreads > 0) {
     _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads);
@@ -122,12 +122,12 @@
 
 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
                                                               int worker) {
-  CompletedBufferNode* nd = NULL;
+  BufferNode* nd = NULL;
   {
     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
     if (_completed_buffers_head != NULL) {
       nd = _completed_buffers_head;
-      _completed_buffers_head = nd->next;
+      _completed_buffers_head = nd->next();
       if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
       _n_completed_buffers--;
       if (_n_completed_buffers == 0) _process_completed = false;
@@ -135,9 +135,9 @@
   }
   ObjectClosure* cl = (par ? _par_closures[worker] : _closure);
   if (nd != NULL) {
-    ObjPtrQueue::apply_closure_to_buffer(cl, nd->buf, 0, _sz);
-    deallocate_buffer(nd->buf);
-    delete nd;
+    void **buf = BufferNode::make_buffer_from_node(nd);
+    ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
+    deallocate_buffer(buf);
     return true;
   } else {
     return false;
@@ -145,13 +145,13 @@
 }
 
 void SATBMarkQueueSet::abandon_partial_marking() {
-  CompletedBufferNode* buffers_to_delete = NULL;
+  BufferNode* buffers_to_delete = NULL;
   {
     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
     while (_completed_buffers_head != NULL) {
-      CompletedBufferNode* nd = _completed_buffers_head;
-      _completed_buffers_head = nd->next;
-      nd->next = buffers_to_delete;
+      BufferNode* nd = _completed_buffers_head;
+      _completed_buffers_head = nd->next();
+      nd->set_next(buffers_to_delete);
       buffers_to_delete = nd;
     }
     _completed_buffers_tail = NULL;
@@ -159,10 +159,9 @@
     DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
   }
   while (buffers_to_delete != NULL) {
-    CompletedBufferNode* nd = buffers_to_delete;
-    buffers_to_delete = nd->next;
-    deallocate_buffer(nd->buf);
-    delete nd;
+    BufferNode* nd = buffers_to_delete;
+    buffers_to_delete = nd->next();
+    deallocate_buffer(BufferNode::make_buffer_from_node(nd));
   }
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   // So we can safely manipulate these queues.
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -60,8 +60,8 @@
   SATBMarkQueueSet();
 
   void initialize(Monitor* cbl_mon, Mutex* fl_lock,
-                  int max_completed_queue = 0,
-                  Mutex* lock = NULL);
+                  int process_completed_threshold,
+                  Mutex* lock);
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
--- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -27,7 +27,7 @@
 
 #define SPARSE_PRT_VERBOSE 0
 
-#define UNROLL_CARD_LOOPS 1
+#define UNROLL_CARD_LOOPS  1
 
 void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
     sprt_iter->init(this);
@@ -36,27 +36,32 @@
 void SparsePRTEntry::init(RegionIdx_t region_ind) {
   _region_ind = region_ind;
   _next_index = NullEntry;
+
 #if UNROLL_CARD_LOOPS
-  assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
-  _cards[0] = NullEntry;
-  _cards[1] = NullEntry;
-  _cards[2] = NullEntry;
-  _cards[3] = NullEntry;
+  assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
+  for (int i = 0; i < cards_num(); i += UnrollFactor) {
+    _cards[i] = NullEntry;
+    _cards[i + 1] = NullEntry;
+    _cards[i + 2] = NullEntry;
+    _cards[i + 3] = NullEntry;
+  }
 #else
-  for (int i = 0; i < CardsPerEntry; i++)
+  for (int i = 0; i < cards_num(); i++)
     _cards[i] = NullEntry;
 #endif
 }
 
 bool SparsePRTEntry::contains_card(CardIdx_t card_index) const {
 #if UNROLL_CARD_LOOPS
-  assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
-  if (_cards[0] == card_index) return true;
-  if (_cards[1] == card_index) return true;
-  if (_cards[2] == card_index) return true;
-  if (_cards[3] == card_index) return true;
+  assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
+  for (int i = 0; i < cards_num(); i += UnrollFactor) {
+    if (_cards[i] == card_index ||
+        _cards[i + 1] == card_index ||
+        _cards[i + 2] == card_index ||
+        _cards[i + 3] == card_index) return true;
+  }
 #else
-  for (int i = 0; i < CardsPerEntry; i++) {
+  for (int i = 0; i < cards_num(); i++) {
     if (_cards[i] == card_index) return true;
   }
 #endif
@@ -67,14 +72,16 @@
 int SparsePRTEntry::num_valid_cards() const {
   int sum = 0;
 #if UNROLL_CARD_LOOPS
-  assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
-  if (_cards[0] != NullEntry) sum++;
-  if (_cards[1] != NullEntry) sum++;
-  if (_cards[2] != NullEntry) sum++;
-  if (_cards[3] != NullEntry) sum++;
+  assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
+  for (int i = 0; i < cards_num(); i += UnrollFactor) {
+    sum += (_cards[i] != NullEntry);
+    sum += (_cards[i + 1] != NullEntry);
+    sum += (_cards[i + 2] != NullEntry);
+    sum += (_cards[i + 3] != NullEntry);
+  }
 #else
-  for (int i = 0; i < CardsPerEntry; i++) {
-    if (_cards[i] != NulLEntry) sum++;
+  for (int i = 0; i < cards_num(); i++) {
+    sum += (_cards[i] != NullEntry);
   }
 #endif
   // Otherwise, we're full.
@@ -83,27 +90,27 @@
 
 SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(CardIdx_t card_index) {
 #if UNROLL_CARD_LOOPS
-  assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
-  CardIdx_t c = _cards[0];
-  if (c == card_index) return found;
-  if (c == NullEntry) { _cards[0] = card_index; return added; }
-  c = _cards[1];
-  if (c == card_index) return found;
-  if (c == NullEntry) { _cards[1] = card_index; return added; }
-  c = _cards[2];
-  if (c == card_index) return found;
-  if (c == NullEntry) { _cards[2] = card_index; return added; }
-  c = _cards[3];
-  if (c == card_index) return found;
-  if (c == NullEntry) { _cards[3] = card_index; return added; }
+  assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
+  CardIdx_t c;
+  for (int i = 0; i < cards_num(); i += UnrollFactor) {
+    c = _cards[i];
+    if (c == card_index) return found;
+    if (c == NullEntry) { _cards[i] = card_index; return added; }
+    c = _cards[i + 1];
+    if (c == card_index) return found;
+    if (c == NullEntry) { _cards[i + 1] = card_index; return added; }
+    c = _cards[i + 2];
+    if (c == card_index) return found;
+    if (c == NullEntry) { _cards[i + 2] = card_index; return added; }
+    c = _cards[i + 3];
+    if (c == card_index) return found;
+    if (c == NullEntry) { _cards[i + 3] = card_index; return added; }
+  }
 #else
-  for (int i = 0; i < CardsPerEntry; i++) {
+  for (int i = 0; i < cards_num(); i++) {
     CardIdx_t c = _cards[i];
     if (c == card_index) return found;
-    if (c == NullEntry) {
-      _cards[i] = card_index;
-      return added;
-    }
+    if (c == NullEntry) { _cards[i] = card_index; return added; }
   }
 #endif
   // Otherwise, we're full.
@@ -112,13 +119,15 @@
 
 void SparsePRTEntry::copy_cards(CardIdx_t* cards) const {
 #if UNROLL_CARD_LOOPS
-  assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
-  cards[0] = _cards[0];
-  cards[1] = _cards[1];
-  cards[2] = _cards[2];
-  cards[3] = _cards[3];
+  assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
+  for (int i = 0; i < cards_num(); i += UnrollFactor) {
+    cards[i] = _cards[i];
+    cards[i + 1] = _cards[i + 1];
+    cards[i + 2] = _cards[i + 2];
+    cards[i + 3] = _cards[i + 3];
+  }
 #else
-  for (int i = 0; i < CardsPerEntry; i++) {
+  for (int i = 0; i < cards_num(); i++) {
     cards[i] = _cards[i];
   }
 #endif
@@ -133,7 +142,7 @@
 RSHashTable::RSHashTable(size_t capacity) :
   _capacity(capacity), _capacity_mask(capacity-1),
   _occupied_entries(0), _occupied_cards(0),
-  _entries(NEW_C_HEAP_ARRAY(SparsePRTEntry, capacity)),
+  _entries((SparsePRTEntry*)NEW_C_HEAP_ARRAY(char, SparsePRTEntry::size() * capacity)),
   _buckets(NEW_C_HEAP_ARRAY(int, capacity)),
   _free_list(NullEntry), _free_region(0)
 {
@@ -161,8 +170,8 @@
                 "_capacity too large");
 
   // This will put -1 == NullEntry in the key field of all entries.
-  memset(_entries, -1, _capacity * sizeof(SparsePRTEntry));
-  memset(_buckets, -1, _capacity * sizeof(int));
+  memset(_entries, NullEntry, _capacity * SparsePRTEntry::size());
+  memset(_buckets, NullEntry, _capacity * sizeof(int));
   _free_list = NullEntry;
   _free_region = 0;
 }
@@ -175,8 +184,8 @@
   if (res == SparsePRTEntry::added) _occupied_cards++;
 #if SPARSE_PRT_VERBOSE
   gclog_or_tty->print_cr("       after add_card[%d]: valid-cards = %d.",
-                pointer_delta(e, _entries, sizeof(SparsePRTEntry)),
-                e->num_valid_cards());
+                         pointer_delta(e, _entries, SparsePRTEntry::size()),
+                         e->num_valid_cards());
 #endif
   assert(e->num_valid_cards() > 0, "Postcondition");
   return res != SparsePRTEntry::overflow;
@@ -199,6 +208,22 @@
   return true;
 }
 
+SparsePRTEntry* RSHashTable::get_entry(RegionIdx_t region_ind) {
+  int ind = (int) (region_ind & capacity_mask());
+  int cur_ind = _buckets[ind];
+  SparsePRTEntry* cur;
+  while (cur_ind != NullEntry &&
+         (cur = entry(cur_ind))->r_ind() != region_ind) {
+    cur_ind = cur->next_index();
+  }
+
+  if (cur_ind == NullEntry) return NULL;
+  // Otherwise...
+  assert(cur->r_ind() == region_ind, "Postcondition of loop + test above.");
+  assert(cur->num_valid_cards() > 0, "Inv");
+  return cur;
+}
+
 bool RSHashTable::delete_entry(RegionIdx_t region_ind) {
   int ind = (int) (region_ind & capacity_mask());
   int* prev_loc = &_buckets[ind];
@@ -225,20 +250,8 @@
   int ind = (int) (region_ind & capacity_mask());
   int cur_ind = _buckets[ind];
   SparsePRTEntry* cur;
-  // XXX
-  // int k = 0;
   while (cur_ind != NullEntry &&
          (cur = entry(cur_ind))->r_ind() != region_ind) {
-    /*
-    k++;
-    if (k > 10) {
-      gclog_or_tty->print_cr("RSHashTable::entry_for_region_ind(%d): "
-                    "k = %d, cur_ind = %d.", region_ind, k, cur_ind);
-      if (k >= 1000) {
-        while (1) ;
-      }
-    }
-    */
     cur_ind = cur->next_index();
   }
 
@@ -319,7 +332,7 @@
 bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
   _card_ind++;
   CardIdx_t ci;
-  if (_card_ind < SparsePRTEntry::CardsPerEntry &&
+  if (_card_ind < SparsePRTEntry::cards_num() &&
       ((ci = _rsht->entry(_bl_ind)->card(_card_ind)) !=
        SparsePRTEntry::NullEntry)) {
     card_index = compute_card_ind(ci);
@@ -359,7 +372,7 @@
 
 size_t RSHashTable::mem_size() const {
   return sizeof(this) +
-    capacity() * (sizeof(SparsePRTEntry) + sizeof(int));
+    capacity() * (SparsePRTEntry::size() + sizeof(int));
 }
 
 // ----------------------------------------------------------------------
@@ -446,6 +459,10 @@
   return _next->get_cards(region_id, cards);
 }
 
+SparsePRTEntry* SparsePRT::get_entry(RegionIdx_t region_id) {
+  return _next->get_entry(region_id);
+}
+
 bool SparsePRT::delete_entry(RegionIdx_t region_id) {
   return _next->delete_entry(region_id);
 }
--- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -32,21 +32,28 @@
 // insertions only enqueue old versions for deletions, but do not delete
 // old versions synchronously.
 
-
 class SparsePRTEntry: public CHeapObj {
 public:
-
   enum SomePublicConstants {
-    CardsPerEntry =  4,
-    NullEntry     = -1
+    NullEntry     = -1,
+    UnrollFactor  =  4
   };
-
 private:
   RegionIdx_t _region_ind;
   int         _next_index;
-  CardIdx_t   _cards[CardsPerEntry];
-
+  CardIdx_t   _cards[1];
+  // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
+  // It should always be the last data member.
 public:
+  // Returns the size of the entry, used for entry allocation.
+  static size_t size() { return sizeof(SparsePRTEntry) + sizeof(CardIdx_t) * (cards_num() - 1); }
+  // Returns the size of the card array.
+  static int cards_num() {
+    // The number of cards should be a multiple of 4, because that's our current
+    // unrolling factor.
+    static const int s = MAX2<int>(G1RSetSparseRegionEntries & ~(UnrollFactor - 1), UnrollFactor);
+    return s;
+  }
 
   // Set the region_ind to the given value, and delete all cards.
   inline void init(RegionIdx_t region_ind);
@@ -134,12 +141,15 @@
   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 
   bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
+
   bool delete_entry(RegionIdx_t region_id);
 
   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
 
   void add_entry(SparsePRTEntry* e);
 
+  SparsePRTEntry* get_entry(RegionIdx_t region_id);
+
   void clear();
 
   size_t capacity() const      { return _capacity;       }
@@ -148,7 +158,7 @@
   size_t occupied_cards() const   { return _occupied_cards;   }
   size_t mem_size() const;
 
-  SparsePRTEntry* entry(int i) const { return &_entries[i]; }
+  SparsePRTEntry* entry(int i) const { return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i); }
 
   void print();
 };
@@ -157,7 +167,7 @@
 class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
   int _tbl_ind;         // [-1, 0.._rsht->_capacity)
   int _bl_ind;          // [-1, 0.._rsht->_capacity)
-  short _card_ind;      // [0..CardsPerEntry)
+  short _card_ind;      // [0..SparsePRTEntry::cards_num())
   RSHashTable* _rsht;
   size_t _heap_bot_card_ind;
 
@@ -176,7 +186,7 @@
   RSHashTableIter(size_t heap_bot_card_ind) :
     _tbl_ind(RSHashTable::NullEntry),
     _bl_ind(RSHashTable::NullEntry),
-    _card_ind((SparsePRTEntry::CardsPerEntry-1)),
+    _card_ind((SparsePRTEntry::cards_num() - 1)),
     _rsht(NULL),
     _heap_bot_card_ind(heap_bot_card_ind)
   {}
@@ -185,7 +195,7 @@
     _rsht = rsht;
     _tbl_ind = -1; // So that first increment gets to 0.
     _bl_ind = RSHashTable::NullEntry;
-    _card_ind = (SparsePRTEntry::CardsPerEntry-1);
+    _card_ind = (SparsePRTEntry::cards_num() - 1);
   }
 
   bool has_next(size_t& card_index);
@@ -241,9 +251,13 @@
 
   // If the table hold an entry for "region_ind",  Copies its
   // cards into "cards", which must be an array of length at least
-  // "CardsPerEntry", and returns "true"; otherwise, returns "false".
+  // "SparePRTEntry::cards_num()", and returns "true"; otherwise,
+  // returns "false".
   bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards);
 
+  // Return the pointer to the entry associated with the given region.
+  SparsePRTEntry* get_entry(RegionIdx_t region_ind);
+
   // If there is an entry for "region_ind", removes it and return "true";
   // otherwise returns "false."
   bool delete_entry(RegionIdx_t region_ind);
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -42,7 +42,7 @@
 void VM_G1IncCollectionPause::doit() {
   JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause);
+  GCCauseSetter x(g1h, _gc_cause);
   g1h->do_collection_pause_at_safepoint();
 }
 
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -68,8 +68,9 @@
 
 class VM_G1IncCollectionPause: public VM_GC_Operation {
  public:
-  VM_G1IncCollectionPause(int gc_count_before) :
-    VM_GC_Operation(gc_count_before) {}
+  VM_G1IncCollectionPause(int gc_count_before,
+                          GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
+    VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
   virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
   virtual void doit();
   virtual const char* name() const {
--- a/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Wed Mar 24 17:16:33 2010 -0700
@@ -221,6 +221,7 @@
 freeList.cpp                            globals.hpp
 freeList.cpp                            mutex.hpp
 freeList.cpp                            sharedHeap.hpp
+freeList.cpp                            vmThread.hpp
 
 freeList.hpp                            allocationStats.hpp
 
--- a/src/share/vm/gc_implementation/includeDB_gc_g1	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/includeDB_gc_g1	Wed Mar 24 17:16:33 2010 -0700
@@ -109,7 +109,6 @@
 dirtyCardQueue.cpp                      dirtyCardQueue.hpp
 dirtyCardQueue.cpp			heapRegionRemSet.hpp
 dirtyCardQueue.cpp                      mutexLocker.hpp
-dirtyCardQueue.cpp                      ptrQueue.inline.hpp
 dirtyCardQueue.cpp                      safepoint.hpp
 dirtyCardQueue.cpp                      thread.hpp
 dirtyCardQueue.cpp                      thread_<os_family>.inline.hpp
@@ -222,6 +221,15 @@
 g1MarkSweep.hpp                         timer.hpp
 g1MarkSweep.hpp                         universe.hpp
 
+g1MemoryPool.cpp                        heapRegion.hpp
+g1MemoryPool.cpp                        g1CollectedHeap.inline.hpp
+g1MemoryPool.cpp                        g1CollectedHeap.hpp
+g1MemoryPool.cpp                        g1CollectorPolicy.hpp
+g1MemoryPool.cpp                        g1MemoryPool.hpp
+
+g1MemoryPool.hpp                        memoryUsage.hpp
+g1MemoryPool.hpp                        memoryPool.hpp
+
 g1OopClosures.inline.hpp		concurrentMark.hpp
 g1OopClosures.inline.hpp		g1OopClosures.hpp
 g1OopClosures.inline.hpp		g1CollectedHeap.hpp
@@ -303,12 +311,13 @@
 
 klass.hpp				g1OopClosures.hpp
 
+memoryService.cpp                       g1MemoryPool.hpp
+
 ptrQueue.cpp                            allocation.hpp
 ptrQueue.cpp                            allocation.inline.hpp
 ptrQueue.cpp                            mutex.hpp
 ptrQueue.cpp                            mutexLocker.hpp
 ptrQueue.cpp                            ptrQueue.hpp
-ptrQueue.cpp                            ptrQueue.inline.hpp
 ptrQueue.cpp                            thread_<os_family>.inline.hpp
 
 ptrQueue.hpp                            allocation.hpp
@@ -318,7 +327,6 @@
 
 satbQueue.cpp                           allocation.inline.hpp
 satbQueue.cpp                           mutexLocker.hpp
-satbQueue.cpp                           ptrQueue.inline.hpp
 satbQueue.cpp                           satbQueue.hpp
 satbQueue.cpp                           sharedHeap.hpp
 satbQueue.cpp                           thread.hpp
--- a/src/share/vm/gc_implementation/includeDB_gc_serial	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/includeDB_gc_serial	Wed Mar 24 17:16:33 2010 -0700
@@ -71,6 +71,7 @@
 gcUtil.hpp                              allocation.hpp
 gcUtil.hpp                              debug.hpp
 gcUtil.hpp                              globalDefinitions.hpp
+gcUtil.hpp                              ostream.hpp
 gcUtil.hpp				timer.hpp
 
 generationCounters.cpp                  generationCounters.hpp
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -50,6 +50,7 @@
                       work_queue_set_, &term_),
   _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
   _keep_alive_closure(&_scan_weak_ref_closure),
+  _promotion_failure_size(0),
   _pushes(0), _pops(0), _steals(0), _steal_attempts(0), _term_attempts(0),
   _strong_roots_time(0.0), _term_time(0.0)
 {
@@ -249,6 +250,16 @@
   }
 }
 
+void ParScanThreadState::print_and_clear_promotion_failure_size() {
+  if (_promotion_failure_size != 0) {
+    if (PrintPromotionFailure) {
+      gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
+        _thread_num, _promotion_failure_size);
+    }
+    _promotion_failure_size = 0;
+  }
+}
+
 class ParScanThreadStateSet: private ResourceArray {
 public:
   // Initializes states for the specified number of threads;
@@ -260,11 +271,11 @@
                         GrowableArray<oop>**    overflow_stacks_,
                         size_t                  desired_plab_sz,
                         ParallelTaskTerminator& term);
-  inline ParScanThreadState& thread_sate(int i);
+  inline ParScanThreadState& thread_state(int i);
   int pushes() { return _pushes; }
   int pops()   { return _pops; }
   int steals() { return _steals; }
-  void reset();
+  void reset(bool promotion_failed);
   void flush();
 private:
   ParallelTaskTerminator& _term;
@@ -295,22 +306,31 @@
   }
 }
 
-inline ParScanThreadState& ParScanThreadStateSet::thread_sate(int i)
+inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
 {
   assert(i >= 0 && i < length(), "sanity check!");
   return ((ParScanThreadState*)_data)[i];
 }
 
 
-void ParScanThreadStateSet::reset()
+void ParScanThreadStateSet::reset(bool promotion_failed)
 {
   _term.reset_for_reuse();
+  if (promotion_failed) {
+    for (int i = 0; i < length(); ++i) {
+      thread_state(i).print_and_clear_promotion_failure_size();
+    }
+  }
 }
 
 void ParScanThreadStateSet::flush()
 {
+  // Work in this loop should be kept as lightweight as
+  // possible since this might otherwise become a bottleneck
+  // to scaling. Should we add heavy-weight work into this
+  // loop, consider parallelizing the loop into the worker threads.
   for (int i = 0; i < length(); ++i) {
-    ParScanThreadState& par_scan_state = thread_sate(i);
+    ParScanThreadState& par_scan_state = thread_state(i);
 
     // Flush stats related to To-space PLAB activity and
     // retire the last buffer.
@@ -362,6 +382,14 @@
       }
     }
   }
+  if (UseConcMarkSweepGC && ParallelGCThreads > 0) {
+    // We need to call this even when ResizeOldPLAB is disabled
+    // so as to avoid breaking some asserts. While we may be able
+    // to avoid this by reorganizing the code a bit, I am loathe
+    // to do that unless we find cases where ergo leads to bad
+    // performance.
+    CFLS_LAB::compute_desired_plab_size();
+  }
 }
 
 ParScanClosure::ParScanClosure(ParNewGeneration* g,
@@ -475,7 +503,7 @@
 
   Generation* old_gen = gch->next_gen(_gen);
 
-  ParScanThreadState& par_scan_state = _state_set->thread_sate(i);
+  ParScanThreadState& par_scan_state = _state_set->thread_state(i);
   par_scan_state.set_young_old_boundary(_young_old_boundary);
 
   par_scan_state.start_strong_roots();
@@ -659,7 +687,7 @@
 {
   ResourceMark rm;
   HandleMark hm;
-  ParScanThreadState& par_scan_state = _state_set.thread_sate(i);
+  ParScanThreadState& par_scan_state = _state_set.thread_state(i);
   par_scan_state.set_young_old_boundary(_young_old_boundary);
   _task.work(i, par_scan_state.is_alive_closure(),
              par_scan_state.keep_alive_closure(),
@@ -693,7 +721,7 @@
   ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
                                  _generation.reserved().end(), _state_set);
   workers->run_task(&rp_task);
-  _state_set.reset();
+  _state_set.reset(_generation.promotion_failed());
 }
 
 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
@@ -813,7 +841,7 @@
     GenCollectedHeap::StrongRootsScope srs(gch);
     tsk.work(0);
   }
-  thread_state_set.reset();
+  thread_state_set.reset(promotion_failed());
 
   if (PAR_STATS_ENABLED && ParallelGCVerbose) {
     gclog_or_tty->print("Thread totals:\n"
@@ -882,6 +910,8 @@
     swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
     from()->set_next_compaction_space(to());
     gch->set_incremental_collection_will_fail();
+    // Inform the next generation that a promotion failure occurred.
+    _next_gen->promotion_failure_occurred();
 
     // Reset the PromotionFailureALot counters.
     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
@@ -1029,6 +1059,8 @@
       new_obj = old;
 
       preserve_mark_if_necessary(old, m);
+      // Log the size of the maiden promotion failure
+      par_scan_state->log_promotion_failure(sz);
     }
 
     old->forward_to(new_obj);
@@ -1150,6 +1182,8 @@
       failed_to_promote = true;
 
       preserve_mark_if_necessary(old, m);
+      // Log the size of the maiden promotion failure
+      par_scan_state->log_promotion_failure(sz);
     }
   } else {
     // Is in to-space; do copying ourselves.
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -97,6 +97,9 @@
   int _pushes, _pops, _steals, _steal_attempts, _term_attempts;
   int _overflow_pushes, _overflow_refills, _overflow_refill_objs;
 
+  // Stats for promotion failure
+  size_t _promotion_failure_size;
+
   // Timing numbers.
   double _start;
   double _start_strong_roots;
@@ -169,6 +172,15 @@
   // Undo the most recent allocation ("obj", of "word_sz").
   void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
 
+  // Promotion failure stats
+  size_t promotion_failure_size() { return promotion_failure_size(); }
+  void log_promotion_failure(size_t sz) {
+    if (_promotion_failure_size == 0) {
+      _promotion_failure_size = sz;
+    }
+  }
+  void print_and_clear_promotion_failure_size();
+
   int pushes() { return _pushes; }
   int pops()   { return _pops; }
   int steals() { return _steals; }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -51,6 +51,8 @@
 }
 
 jint ParallelScavengeHeap::initialize() {
+  CollectedHeap::pre_initialize();
+
   // Cannot be initialized until after the flags are parsed
   GenerationSizer flag_parser;
 
@@ -717,10 +719,6 @@
   return young_gen()->allocate(size, true);
 }
 
-void ParallelScavengeHeap::fill_all_tlabs(bool retire) {
-  CollectedHeap::fill_all_tlabs(retire);
-}
-
 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
   CollectedHeap::accumulate_statistics_all_tlabs();
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -54,7 +54,6 @@
  protected:
   static inline size_t total_invocations();
   HeapWord* allocate_new_tlab(size_t size);
-  void fill_all_tlabs(bool retire);
 
  public:
   ParallelScavengeHeap() : CollectedHeap() {
@@ -191,6 +190,10 @@
     return true;
   }
 
+  virtual bool card_mark_must_follow_store() const {
+    return false;
+  }
+
   // Return true if we don't we need a store barrier for
   // initializing stores to an object at this address.
   virtual bool can_elide_initializing_store_barrier(oop new_obj);
--- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -51,7 +51,7 @@
 
     cname = PerfDataManager::counter_name(name_space(), "oldCapacity");
     _old_capacity = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Bytes, (jlong) Arguments::initial_heap_size(), CHECK);
+      PerfData::U_Bytes, (jlong) InitialHeapSize, CHECK);
 
     cname = PerfDataManager::counter_name(name_space(), "boundaryMoved");
     _boundary_moved = PerfDataManager::create_variable(SUN_GC, cname,
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -31,7 +31,7 @@
   // beginning of this sweep:
   //   Count(end_last_sweep) - Count(start_this_sweep)
   //     + splitBirths(between) - splitDeaths(between)
-  // The above number divided by the time since the start [END???] of the
+  // The above number divided by the time since the end of the
   // previous sweep gives us a time rate of demand for blocks
   // of this size. We compute a padded average of this rate as
   // our current estimate for the time rate of demand for blocks
@@ -41,7 +41,7 @@
   // estimates.
   AdaptivePaddedAverage _demand_rate_estimate;
 
-  ssize_t     _desired;          // Estimate computed as described above
+  ssize_t     _desired;         // Demand stimate computed as described above
   ssize_t     _coalDesired;     // desired +/- small-percent for tuning coalescing
 
   ssize_t     _surplus;         // count - (desired +/- small-percent),
@@ -53,9 +53,9 @@
   ssize_t     _coalDeaths;      // loss from coalescing
   ssize_t     _splitBirths;     // additional chunks from splitting
   ssize_t     _splitDeaths;     // loss from splitting
-  size_t     _returnedBytes;    // number of bytes returned to list.
+  size_t      _returnedBytes;   // number of bytes returned to list.
  public:
-  void initialize() {
+  void initialize(bool split_birth = false) {
     AdaptivePaddedAverage* dummy =
       new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
                                                          CMS_FLSPadding);
@@ -67,7 +67,7 @@
     _beforeSweep = 0;
     _coalBirths = 0;
     _coalDeaths = 0;
-    _splitBirths = 0;
+    _splitBirths = split_birth? 1 : 0;
     _splitDeaths = 0;
     _returnedBytes = 0;
   }
@@ -75,10 +75,12 @@
   AllocationStats() {
     initialize();
   }
+
   // The rate estimate is in blocks per second.
   void compute_desired(size_t count,
                        float inter_sweep_current,
-                       float inter_sweep_estimate) {
+                       float inter_sweep_estimate,
+                       float intra_sweep_estimate) {
     // If the latest inter-sweep time is below our granularity
     // of measurement, we may call in here with
     // inter_sweep_current == 0. However, even for suitably small
@@ -88,12 +90,31 @@
     // vulnerable to noisy glitches. In such cases, we
     // ignore the current sample and use currently available
     // historical estimates.
+    // XXX NEEDS TO BE FIXED
+    // assert(prevSweep() + splitBirths() >= splitDeaths() + (ssize_t)count, "Conservation Principle");
+    //     ^^^^^^^^^^^^^^^^^^^^^^^^^^^    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+    //     "Total Stock"                  "Not used at this block size"
     if (inter_sweep_current > _threshold) {
-      ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths();
+      ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() - splitDeaths();
+      // XXX NEEDS TO BE FIXED
+      // assert(demand >= 0, "Demand should be non-negative");
+      // Defensive: adjust for imprecision in event counting
+      if (demand < 0) {
+        demand = 0;
+      }
+      float old_rate = _demand_rate_estimate.padded_average();
       float rate = ((float)demand)/inter_sweep_current;
       _demand_rate_estimate.sample(rate);
-      _desired = (ssize_t)(_demand_rate_estimate.padded_average()
-                           *inter_sweep_estimate);
+      float new_rate = _demand_rate_estimate.padded_average();
+      ssize_t old_desired = _desired;
+      _desired = (ssize_t)(new_rate * (inter_sweep_estimate
+                                       + CMSExtrapolateSweep
+                                         ? intra_sweep_estimate
+                                         : 0.0));
+      if (PrintFLSStatistics > 1) {
+        gclog_or_tty->print_cr("demand: %d, old_rate: %f, current_rate: %f, new_rate: %f, old_desired: %d, new_desired: %d",
+                                demand,     old_rate,     rate,             new_rate,     old_desired,     _desired);
+      }
     }
   }
 
--- a/src/share/vm/gc_implementation/shared/gcUtil.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/shared/gcUtil.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -52,11 +52,35 @@
   _last_sample = new_sample;
 }
 
+void AdaptiveWeightedAverage::print() const {
+  print_on(tty);
+}
+
+void AdaptiveWeightedAverage::print_on(outputStream* st) const {
+  guarantee(false, "NYI");
+}
+
+void AdaptivePaddedAverage::print() const {
+  print_on(tty);
+}
+
+void AdaptivePaddedAverage::print_on(outputStream* st) const {
+  guarantee(false, "NYI");
+}
+
+void AdaptivePaddedNoZeroDevAverage::print() const {
+  print_on(tty);
+}
+
+void AdaptivePaddedNoZeroDevAverage::print_on(outputStream* st) const {
+  guarantee(false, "NYI");
+}
+
 void AdaptivePaddedAverage::sample(float new_sample) {
-  // Compute our parent classes sample information
+  // Compute new adaptive weighted average based on new sample.
   AdaptiveWeightedAverage::sample(new_sample);
 
-  // Now compute the deviation and the new padded sample
+  // Now update the deviation and the padded average.
   float new_avg = average();
   float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
                                            deviation());
--- a/src/share/vm/gc_implementation/shared/gcUtil.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -54,8 +54,8 @@
 
  public:
   // Input weight must be between 0 and 100
-  AdaptiveWeightedAverage(unsigned weight) :
-    _average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) {
+  AdaptiveWeightedAverage(unsigned weight, float avg = 0.0) :
+    _average(avg), _sample_count(0), _weight(weight), _last_sample(0.0) {
   }
 
   void clear() {
@@ -64,6 +64,13 @@
     _last_sample = 0;
   }
 
+  // Useful for modifying static structures after startup.
+  void  modify(size_t avg, unsigned wt, bool force = false)  {
+    assert(force, "Are you sure you want to call this?");
+    _average = (float)avg;
+    _weight  = wt;
+  }
+
   // Accessors
   float    average() const       { return _average;       }
   unsigned weight()  const       { return _weight;        }
@@ -83,6 +90,10 @@
     // Convert to float and back to avoid integer overflow.
     return (size_t)exp_avg((float)avg, (float)sample, weight);
   }
+
+  // Printing
+  void print_on(outputStream* st) const;
+  void print() const;
 };
 
 
@@ -129,6 +140,10 @@
 
   // Override
   void  sample(float new_sample);
+
+  // Printing
+  void print_on(outputStream* st) const;
+  void print() const;
 };
 
 // A weighted average that includes a deviation from the average,
@@ -146,7 +161,12 @@
     AdaptivePaddedAverage(weight, padding)  {}
   // Override
   void  sample(float new_sample);
+
+  // Printing
+  void print_on(outputStream* st) const;
+  void print() const;
 };
+
 // Use a least squares fit to a set of data to generate a linear
 // equation.
 //              y = intercept + slope * x
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -59,8 +59,19 @@
                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
                              80, GCCause::to_string(_gc_lastcause), CHECK);
   }
+  _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
 }
 
+void CollectedHeap::pre_initialize() {
+  // Used for ReduceInitialCardMarks (when COMPILER2 is used);
+  // otherwise remains unused.
+#ifdef COMPLER2
+  _defer_initial_card_mark =    ReduceInitialCardMarks && can_elide_tlab_store_barriers()
+                             && (DeferInitialCardMark || card_mark_must_follow_store());
+#else
+  assert(_defer_initial_card_mark == false, "Who would set it?");
+#endif
+}
 
 #ifndef PRODUCT
 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
@@ -140,12 +151,13 @@
 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
   MemRegion deferred = thread->deferred_card_mark();
   if (!deferred.is_empty()) {
+    assert(_defer_initial_card_mark, "Otherwise should be empty");
     {
       // Verify that the storage points to a parsable object in heap
       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
       assert(is_in(old_obj), "Not in allocated heap");
       assert(!can_elide_initializing_store_barrier(old_obj),
-             "Else should have been filtered in defer_store_barrier()");
+             "Else should have been filtered in new_store_pre_barrier()");
       assert(!is_in_permanent(old_obj), "Sanity: not expected");
       assert(old_obj->is_oop(true), "Not an oop");
       assert(old_obj->is_parsable(), "Will not be concurrently parsable");
@@ -174,9 +186,7 @@
 //     so long as the card-mark is completed before the next
 //     scavenge. For all these cases, we can do a card mark
 //     at the point at which we do a slow path allocation
-//     in the old gen. For uniformity, however, we end
-//     up using the same scheme (see below) for all three
-//     cases (deferring the card-mark appropriately).
+//     in the old gen, i.e. in this call.
 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
 //     in addition that the card-mark for an old gen allocated
 //     object strictly follow any associated initializing stores.
@@ -199,12 +209,13 @@
 //     but, like in CMS, because of the presence of concurrent refinement
 //     (much like CMS' precleaning), must strictly follow the oop-store.
 //     Thus, using the same protocol for maintaining the intended
-//     invariants turns out, serendepitously, to be the same for all
-//     three collectors/heap types above.
+//     invariants turns out, serendepitously, to be the same for both
+//     G1 and CMS.
 //
-// For each future collector, this should be reexamined with
-// that specific collector in mind.
-oop CollectedHeap::defer_store_barrier(JavaThread* thread, oop new_obj) {
+// For any future collector, this code should be reexamined with
+// that specific collector in mind, and the documentation above suitably
+// extended and updated.
+oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
   // If a previous card-mark was deferred, flush it now.
   flush_deferred_store_barrier(thread);
   if (can_elide_initializing_store_barrier(new_obj)) {
@@ -212,10 +223,17 @@
     // following the flush above.
     assert(thread->deferred_card_mark().is_empty(), "Error");
   } else {
-    // Remember info for the newly deferred store barrier
-    MemRegion deferred = MemRegion((HeapWord*)new_obj, new_obj->size());
-    assert(!deferred.is_empty(), "Error");
-    thread->set_deferred_card_mark(deferred);
+    MemRegion mr((HeapWord*)new_obj, new_obj->size());
+    assert(!mr.is_empty(), "Error");
+    if (_defer_initial_card_mark) {
+      // Defer the card mark
+      thread->set_deferred_card_mark(mr);
+    } else {
+      // Do the card mark
+      BarrierSet* bs = barrier_set();
+      assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
+      bs->write_region(mr);
+    }
   }
   return new_obj;
 }
@@ -241,9 +259,9 @@
   assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
 }
 
-void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
+void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
 {
-  if (ZapFillerObjects) {
+  if (ZapFillerObjects && zap) {
     Copy::fill_to_words(start + filler_array_hdr_size(),
                         words - filler_array_hdr_size(), 0XDEAFBABE);
   }
@@ -251,7 +269,7 @@
 #endif // ASSERT
 
 void
-CollectedHeap::fill_with_array(HeapWord* start, size_t words)
+CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
 {
   assert(words >= filler_array_min_size(), "too small for an array");
   assert(words <= filler_array_max_size(), "too big for a single object");
@@ -262,31 +280,31 @@
   // Set the length first for concurrent GC.
   ((arrayOop)start)->set_length((int)len);
   post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
-  DEBUG_ONLY(zap_filler_array(start, words);)
+  DEBUG_ONLY(zap_filler_array(start, words, zap);)
 }
 
 void
-CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
+CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
 {
   assert(words <= filler_array_max_size(), "too big for a single object");
 
   if (words >= filler_array_min_size()) {
-    fill_with_array(start, words);
+    fill_with_array(start, words, zap);
   } else if (words > 0) {
     assert(words == min_fill_size(), "unaligned size");
-    post_allocation_setup_common(SystemDictionary::object_klass(), start,
+    post_allocation_setup_common(SystemDictionary::Object_klass(), start,
                                  words);
   }
 }
 
-void CollectedHeap::fill_with_object(HeapWord* start, size_t words)
+void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
 {
   DEBUG_ONLY(fill_args_check(start, words);)
   HandleMark hm;  // Free handles before leaving.
-  fill_with_object_impl(start, words);
+  fill_with_object_impl(start, words, zap);
 }
 
-void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
+void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 {
   DEBUG_ONLY(fill_args_check(start, words);)
   HandleMark hm;  // Free handles before leaving.
@@ -299,13 +317,13 @@
   const size_t max = filler_array_max_size();
   while (words > max) {
     const size_t cur = words - max >= min ? max : max - min;
-    fill_with_array(start, cur);
+    fill_with_array(start, cur, zap);
     start += cur;
     words -= cur;
   }
 #endif
 
-  fill_with_object_impl(start, words);
+  fill_with_object_impl(start, words, zap);
 }
 
 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
@@ -313,22 +331,6 @@
   return NULL;
 }
 
-void CollectedHeap::fill_all_tlabs(bool retire) {
-  assert(UseTLAB, "should not reach here");
-  // See note in ensure_parsability() below.
-  assert(SafepointSynchronize::is_at_safepoint() ||
-         !is_init_completed(),
-         "should only fill tlabs at safepoint");
-  // The main thread starts allocating via a TLAB even before it
-  // has added itself to the threads list at vm boot-up.
-  assert(Threads::first() != NULL,
-         "Attempt to fill tlabs before main thread has been added"
-         " to threads list is doomed to failure!");
-  for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
-     thread->tlab().make_parsable(retire);
-  }
-}
-
 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
   // The second disjunct in the assertion below makes a concession
   // for the start-up verification done while the VM is being
@@ -343,8 +345,24 @@
          "Should only be called at a safepoint or at start-up"
          " otherwise concurrent mutator activity may make heap "
          " unparsable again");
-  if (UseTLAB) {
-    fill_all_tlabs(retire_tlabs);
+  const bool use_tlab = UseTLAB;
+  const bool deferred = _defer_initial_card_mark;
+  // The main thread starts allocating via a TLAB even before it
+  // has added itself to the threads list at vm boot-up.
+  assert(!use_tlab || Threads::first() != NULL,
+         "Attempt to fill tlabs before main thread has been added"
+         " to threads list is doomed to failure!");
+  for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
+     if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
+#ifdef COMPILER2
+     // The deferred store barriers must all have been flushed to the
+     // card-table (or other remembered set structure) before GC starts
+     // processing the card-table (or other remembered set).
+     if (deferred) flush_deferred_store_barrier(thread);
+#else
+     assert(!deferred, "Should be false");
+     assert(thread->deferred_card_mark().is_empty(), "Should be empty");
+#endif
   }
 }
 
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -51,6 +51,9 @@
   // Used for filler objects (static, but initialized in ctor).
   static size_t _filler_array_max_size;
 
+  // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
+  bool _defer_initial_card_mark;
+
  protected:
   MemRegion _reserved;
   BarrierSet* _barrier_set;
@@ -70,13 +73,16 @@
   // Constructor
   CollectedHeap();
 
+  // Do common initializations that must follow instance construction,
+  // for example, those needing virtual calls.
+  // This code could perhaps be moved into initialize() but would
+  // be slightly more awkward because we want the latter to be a
+  // pure virtual.
+  void pre_initialize();
+
   // Create a new tlab
   virtual HeapWord* allocate_new_tlab(size_t size);
 
-  // Fix up tlabs to make the heap well-formed again,
-  // optionally retiring the tlabs.
-  virtual void fill_all_tlabs(bool retire);
-
   // Accumulate statistics on all tlabs.
   virtual void accumulate_statistics_all_tlabs();
 
@@ -127,14 +133,14 @@
   static inline size_t filler_array_max_size();
 
   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
-  DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);)
+  DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
 
   // Fill with a single array; caller must ensure filler_array_min_size() <=
   // words <= filler_array_max_size().
-  static inline void fill_with_array(HeapWord* start, size_t words);
+  static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
 
   // Fill with a single object (either an int array or a java.lang.Object).
-  static inline void fill_with_object_impl(HeapWord* start, size_t words);
+  static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
 
   // Verification functions
   virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
@@ -338,14 +344,14 @@
     return size_t(align_object_size(oopDesc::header_size()));
   }
 
-  static void fill_with_objects(HeapWord* start, size_t words);
+  static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
 
-  static void fill_with_object(HeapWord* start, size_t words);
-  static void fill_with_object(MemRegion region) {
-    fill_with_object(region.start(), region.word_size());
+  static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
+  static void fill_with_object(MemRegion region, bool zap = true) {
+    fill_with_object(region.start(), region.word_size(), zap);
   }
-  static void fill_with_object(HeapWord* start, HeapWord* end) {
-    fill_with_object(start, pointer_delta(end, start));
+  static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
+    fill_with_object(start, pointer_delta(end, start), zap);
   }
 
   // Some heaps may offer a contiguous region for shared non-blocking
@@ -431,14 +437,25 @@
   // promises to call this function on such a slow-path-allocated
   // object before performing initializations that have elided
   // store barriers. Returns new_obj, or maybe a safer copy thereof.
-  virtual oop defer_store_barrier(JavaThread* thread, oop new_obj);
+  virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
 
   // Answers whether an initializing store to a new object currently
-  // allocated at the given address doesn't need a (deferred) store
+  // allocated at the given address doesn't need a store
   // barrier. Returns "true" if it doesn't need an initializing
   // store barrier; answers "false" if it does.
   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
 
+  // If a compiler is eliding store barriers for TLAB-allocated objects,
+  // we will be informed of a slow-path allocation by a call
+  // to new_store_pre_barrier() above. Such a call precedes the
+  // initialization of the object itself, and no post-store-barriers will
+  // be issued. Some heap types require that the barrier strictly follows
+  // the initializing stores. (This is currently implemented by deferring the
+  // barrier until the next slow-path allocation or gc-related safepoint.)
+  // This interface answers whether a particular heap type needs the card
+  // mark to be thus strictly sequenced after the stores.
+  virtual bool card_mark_must_follow_store() const = 0;
+
   // If the CollectedHeap was asked to defer a store barrier above,
   // this informs it to flush such a deferred store barrier to the
   // remembered set.
--- a/src/share/vm/includeDB_compiler2	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/includeDB_compiler2	Wed Mar 24 17:16:33 2010 -0700
@@ -149,11 +149,15 @@
 c2compiler.hpp                          abstractCompiler.hpp
 
 callGenerator.cpp                       addnode.hpp
+callGenerator.cpp                       bcEscapeAnalyzer.hpp
 callGenerator.cpp                       callGenerator.hpp
 callGenerator.cpp                       callnode.hpp
 callGenerator.cpp                       cfgnode.hpp
 callGenerator.cpp                       compileLog.hpp
 callGenerator.cpp                       connode.hpp
+callGenerator.cpp                       ciCPCache.hpp
+callGenerator.cpp                       ciMethodHandle.hpp
+callGenerator.cpp                       javaClasses.hpp
 callGenerator.cpp                       parse.hpp
 callGenerator.cpp                       rootnode.hpp
 callGenerator.cpp                       runtime.hpp
@@ -321,6 +325,7 @@
 compile.cpp                             rootnode.hpp
 compile.cpp                             runtime.hpp
 compile.cpp                             signature.hpp
+compile.cpp                             stringopts.hpp
 compile.cpp                             stubRoutines.hpp
 compile.cpp                             systemDictionary.hpp
 compile.cpp                             timer.hpp
@@ -389,6 +394,9 @@
 
 doCall.cpp                              addnode.hpp
 doCall.cpp                              callGenerator.hpp
+doCall.cpp                              ciCallSite.hpp
+doCall.cpp                              ciCPCache.hpp
+doCall.cpp                              ciMethodHandle.hpp
 doCall.cpp                              cfgnode.hpp
 doCall.cpp                              compileLog.hpp
 doCall.cpp                              linkResolver.hpp
@@ -476,12 +484,16 @@
 graphKit.cpp                            runtime.hpp
 graphKit.cpp                            sharedRuntime.hpp
 
+graphKit.hpp                            addnode.hpp
 graphKit.hpp                            callnode.hpp
 graphKit.hpp                            cfgnode.hpp
 graphKit.hpp                            ciEnv.hpp
+graphKit.hpp                            divnode.hpp
 graphKit.hpp                            compile.hpp
 graphKit.hpp                            deoptimization.hpp
 graphKit.hpp                            phaseX.hpp
+graphKit.hpp                            mulnode.hpp
+graphKit.hpp                            subnode.hpp
 graphKit.hpp                            type.hpp
 
 idealKit.cpp                            addnode.hpp
@@ -490,7 +502,10 @@
 idealKit.cpp                            idealKit.hpp
 idealKit.cpp				runtime.hpp
 
+idealKit.hpp                            addnode.hpp
+idealKit.hpp                            cfgnode.hpp
 idealKit.hpp                            connode.hpp
+idealKit.hpp                            divnode.hpp
 idealKit.hpp                            mulnode.hpp
 idealKit.hpp                            phaseX.hpp
 idealKit.hpp                            subnode.hpp
@@ -586,6 +601,7 @@
 
 loopTransform.cpp                       addnode.hpp
 loopTransform.cpp                       allocation.inline.hpp
+loopTransform.cpp                       callnode.hpp
 loopTransform.cpp                       connode.hpp
 loopTransform.cpp                       compileLog.hpp
 loopTransform.cpp                       divnode.hpp
@@ -641,6 +657,7 @@
 macro.cpp                               callnode.hpp
 macro.cpp                               cfgnode.hpp
 macro.cpp                               compile.hpp
+macro.cpp                              compileLog.hpp
 macro.cpp                               connode.hpp
 macro.cpp                               locknode.hpp
 macro.cpp                               loopnode.hpp
@@ -758,6 +775,7 @@
 output.cpp                              assembler.inline.hpp
 output.cpp                              callnode.hpp
 output.cpp                              cfgnode.hpp
+output.cpp                              compileBroker.hpp
 output.cpp                              debugInfo.hpp
 output.cpp                              debugInfoRec.hpp
 output.cpp                              handles.inline.hpp
@@ -993,6 +1011,21 @@
 split_if.cpp                            connode.hpp
 split_if.cpp                            loopnode.hpp
 
+stringopts.hpp                          phaseX.hpp
+stringopts.hpp                          node.hpp
+
+stringopts.cpp                          addnode.hpp
+stringopts.cpp                          callnode.hpp
+stringopts.cpp                          callGenerator.hpp
+stringopts.cpp                          compileLog.hpp
+stringopts.cpp                          divnode.hpp
+stringopts.cpp                          idealKit.hpp
+stringopts.cpp                          graphKit.hpp
+stringopts.cpp                          rootnode.hpp
+stringopts.cpp                          runtime.hpp
+stringopts.cpp                          subnode.hpp
+stringopts.cpp                          stringopts.hpp
+
 stubGenerator_<arch_model>.cpp          runtime.hpp
 
 stubRoutines.cpp                        runtime.hpp
--- a/src/share/vm/includeDB_core	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/includeDB_core	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -175,6 +175,7 @@
 arguments.cpp                           management.hpp
 arguments.cpp                           oop.inline.hpp
 arguments.cpp                           os_<os_family>.inline.hpp
+arguments.cpp                           referenceProcessor.hpp
 arguments.cpp                           universe.inline.hpp
 arguments.cpp                           vm_version_<arch>.hpp
 
@@ -289,7 +290,7 @@
 attachListener.hpp                      debug.hpp
 attachListener.hpp                      ostream.hpp
 
-barrierSet.cpp				barrierSet.hpp
+barrierSet.cpp				barrierSet.inline.hpp
 barrierSet.cpp			        collectedHeap.hpp
 barrierSet.cpp				universe.hpp
 
@@ -516,6 +517,11 @@
 
 ciCallProfile.hpp                       ciClassList.hpp
 
+ciCallSite.cpp                          ciCallSite.hpp
+ciCallSite.cpp                          ciUtilities.hpp
+
+ciCallSite.hpp                          ciInstance.hpp
+
 ciConstant.cpp                          allocation.hpp
 ciConstant.cpp                          allocation.inline.hpp
 ciConstant.cpp                          ciConstant.hpp
@@ -532,6 +538,12 @@
 ciConstantPoolCache.hpp                 growableArray.hpp
 ciConstantPoolCache.hpp                 resourceArea.hpp
 
+ciCPCache.cpp                           cpCacheOop.hpp
+ciCPCache.cpp                           ciCPCache.hpp
+
+ciCPCache.hpp                           ciClassList.hpp
+ciCPCache.hpp                           ciObject.hpp
+
 ciEnv.cpp                               allocation.inline.hpp
 ciEnv.cpp                               ciConstant.hpp
 ciEnv.cpp                               ciEnv.hpp
@@ -570,6 +582,7 @@
 ciEnv.hpp                               dependencies.hpp
 ciEnv.hpp                               exceptionHandlerTable.hpp
 ciEnv.hpp                               oopMap.hpp
+ciEnv.hpp                               systemDictionary.hpp
 ciEnv.hpp                               thread.hpp
 
 ciExceptionHandler.cpp                  ciExceptionHandler.hpp
@@ -592,6 +605,7 @@
 ciField.hpp                             ciClassList.hpp
 ciField.hpp                             ciConstant.hpp
 ciField.hpp                             ciFlags.hpp
+ciField.hpp                             ciInstance.hpp
 
 ciFlags.cpp                             ciFlags.hpp
 
@@ -678,6 +692,7 @@
 ciMethod.hpp                            ciInstanceKlass.hpp
 ciMethod.hpp                            ciObject.hpp
 ciMethod.hpp                            ciSignature.hpp
+ciMethod.hpp                            methodHandles.hpp
 ciMethod.hpp                            methodLiveness.hpp
 
 ciMethodBlocks.cpp                      bytecode.hpp
@@ -709,6 +724,15 @@
 ciMethodKlass.hpp                       ciKlass.hpp
 ciMethodKlass.hpp                       ciSymbol.hpp
 
+ciMethodHandle.cpp                      ciClassList.hpp
+ciMethodHandle.cpp                      ciInstance.hpp
+ciMethodHandle.cpp                      ciMethodHandle.hpp
+ciMethodHandle.cpp                      ciUtilities.hpp
+ciMethodHandle.cpp                      methodHandles.hpp
+ciMethodHandle.cpp                      methodHandleWalk.hpp
+
+ciMethodHandle.hpp                      methodHandles.hpp
+
 ciNullObject.cpp                        ciNullObject.hpp
 
 ciNullObject.hpp                        ciClassList.hpp
@@ -754,11 +778,14 @@
 ciObject.hpp                            jniHandles.hpp
 
 ciObjectFactory.cpp                     allocation.inline.hpp
+ciObjectFactory.cpp                     ciCallSite.hpp
+ciObjectFactory.cpp                     ciCPCache.hpp
 ciObjectFactory.cpp                     ciInstance.hpp
 ciObjectFactory.cpp                     ciInstanceKlass.hpp
 ciObjectFactory.cpp                     ciInstanceKlassKlass.hpp
 ciObjectFactory.cpp                     ciMethod.hpp
 ciObjectFactory.cpp                     ciMethodData.hpp
+ciObjectFactory.cpp                     ciMethodHandle.hpp
 ciObjectFactory.cpp                     ciMethodKlass.hpp
 ciObjectFactory.cpp                     ciNullObject.hpp
 ciObjectFactory.cpp                     ciObjArray.hpp
@@ -792,6 +819,7 @@
 ciSignature.hpp                         globalDefinitions.hpp
 ciSignature.hpp                         growableArray.hpp
 
+ciStreams.cpp                           ciCallSite.hpp
 ciStreams.cpp                           ciConstant.hpp
 ciStreams.cpp                           ciField.hpp
 ciStreams.cpp                           ciStreams.hpp
@@ -894,6 +922,7 @@
 
 classLoader.cpp                         allocation.inline.hpp
 classLoader.cpp                         arguments.hpp
+classLoader.cpp                         bytecodeStream.hpp
 classLoader.cpp                         classFileParser.hpp
 classLoader.cpp                         classFileStream.hpp
 classLoader.cpp                         classLoader.hpp
@@ -921,6 +950,7 @@
 classLoader.cpp                         management.hpp
 classLoader.cpp                         oop.inline.hpp
 classLoader.cpp                         oopFactory.hpp
+classLoader.cpp                         oopMapCache.hpp
 classLoader.cpp                         os_<os_family>.inline.hpp
 classLoader.cpp                         symbolOop.hpp
 classLoader.cpp                         systemDictionary.hpp
@@ -1003,6 +1033,7 @@
 codeCache.cpp                           oop.inline.hpp
 codeCache.cpp                           pcDesc.hpp
 codeCache.cpp                           resourceArea.hpp
+codeCache.cpp                           xmlstream.hpp
 
 codeCache.hpp                           allocation.hpp
 codeCache.hpp                           codeBlob.hpp
@@ -1091,6 +1122,7 @@
 compileBroker.cpp                       oop.inline.hpp
 compileBroker.cpp                       os.hpp
 compileBroker.cpp                       sharedRuntime.hpp
+compileBroker.cpp                       sweeper.hpp
 compileBroker.cpp                       systemDictionary.hpp
 compileBroker.cpp                       vmSymbols.hpp
 
@@ -1291,6 +1323,7 @@
 cpCacheOop.cpp                          markSweep.inline.hpp
 cpCacheOop.cpp                          objArrayOop.hpp
 cpCacheOop.cpp                          oop.inline.hpp
+cpCacheOop.cpp                          rewriter.hpp
 cpCacheOop.cpp                          universe.inline.hpp
 
 cpCacheOop.hpp                          allocation.hpp
@@ -1451,6 +1484,7 @@
 deoptimization.cpp                      vframe.hpp
 deoptimization.cpp                      vframeArray.hpp
 deoptimization.cpp                      vframe_hp.hpp
+deoptimization.cpp                      vmreg_<arch>.inline.hpp
 deoptimization.cpp                      xmlstream.hpp
 
 deoptimization.hpp                      allocation.hpp
@@ -1497,6 +1531,7 @@
 disassembler.cpp                        fprofiler.hpp
 disassembler.cpp                        handles.inline.hpp
 disassembler.cpp                        hpi.hpp
+disassembler.cpp                        javaClasses.hpp
 disassembler.cpp                        stubCodeGenerator.hpp
 disassembler.cpp                        stubRoutines.hpp
 
@@ -2501,6 +2536,7 @@
 jvmtiExport.hpp                         handles.hpp
 jvmtiExport.hpp                         iterator.hpp
 jvmtiExport.hpp                         jvmti.h
+jvmtiExport.hpp                         jvmticmlr.h
 jvmtiExport.hpp                         oop.hpp
 jvmtiExport.hpp                         oopsHierarchy.hpp
 
@@ -2619,6 +2655,7 @@
 loaderConstraints.cpp                   safepoint.hpp
 
 loaderConstraints.hpp                   dictionary.hpp
+loaderConstraints.hpp                   placeholders.hpp
 loaderConstraints.hpp                   hashtable.hpp
 
 location.cpp                            debugInfo.hpp
@@ -2812,6 +2849,12 @@
 methodDataOop.hpp                       orderAccess.hpp
 methodDataOop.hpp                       universe.hpp
 
+methodHandleWalk.hpp                    methodHandles.hpp
+
+methodHandleWalk.cpp                    methodHandleWalk.hpp
+methodHandleWalk.cpp                    oopFactory.hpp
+methodHandleWalk.cpp                    rewriter.hpp
+
 methodHandles.hpp                       frame.inline.hpp
 methodHandles.hpp                       globals.hpp
 methodHandles.hpp                       interfaceSupport.hpp
@@ -3469,6 +3512,7 @@
 reflection.cpp                          javaClasses.hpp
 reflection.cpp                          jvm.h
 reflection.cpp                          linkResolver.hpp
+reflection.cpp                          methodHandleWalk.hpp
 reflection.cpp                          objArrayKlass.hpp
 reflection.cpp                          objArrayOop.hpp
 reflection.cpp                          oopFactory.hpp
@@ -3680,6 +3724,7 @@
 sharedRuntime.cpp                       abstractCompiler.hpp
 sharedRuntime.cpp                       arguments.hpp
 sharedRuntime.cpp                       biasedLocking.hpp
+sharedRuntime.cpp                       compileBroker.hpp
 sharedRuntime.cpp                       compiledIC.hpp
 sharedRuntime.cpp                       compilerOracle.hpp
 sharedRuntime.cpp                       copy.hpp
@@ -3688,6 +3733,7 @@
 sharedRuntime.cpp                       forte.hpp
 sharedRuntime.cpp                       gcLocker.inline.hpp
 sharedRuntime.cpp                       handles.inline.hpp
+sharedRuntime.cpp                       hashtable.inline.hpp
 sharedRuntime.cpp                       init.hpp
 sharedRuntime.cpp                       interfaceSupport.hpp
 sharedRuntime.cpp                       interpreterRuntime.hpp
@@ -3715,6 +3761,7 @@
 sharedRuntime.hpp                       allocation.hpp
 sharedRuntime.hpp                       bytecodeHistogram.hpp
 sharedRuntime.hpp                       bytecodeTracer.hpp
+sharedRuntime.hpp                       hashtable.hpp
 sharedRuntime.hpp                       linkResolver.hpp
 sharedRuntime.hpp                       resourceArea.hpp
 sharedRuntime.hpp                       threadLocalStorage.hpp
@@ -3932,6 +3979,7 @@
 
 sweeper.cpp                             atomic.hpp
 sweeper.cpp                             codeCache.hpp
+sweeper.cpp                             compileBroker.hpp
 sweeper.cpp                             events.hpp
 sweeper.cpp                             methodOop.hpp
 sweeper.cpp                             mutexLocker.hpp
@@ -3939,6 +3987,8 @@
 sweeper.cpp                             os.hpp
 sweeper.cpp                             resourceArea.hpp
 sweeper.cpp                             sweeper.hpp
+sweeper.cpp                             vm_operations.hpp
+sweeper.cpp                             xmlstream.hpp
 
 symbolKlass.cpp                         gcLocker.hpp
 symbolKlass.cpp                         handles.inline.hpp
@@ -4592,6 +4642,7 @@
 vm_operations.cpp                       interfaceSupport.hpp
 vm_operations.cpp                       isGCActiveMark.hpp
 vm_operations.cpp                       resourceArea.hpp
+vm_operations.cpp                       sweeper.hpp
 vm_operations.cpp                       threadService.hpp
 vm_operations.cpp                       thread_<os_family>.inline.hpp
 vm_operations.cpp                       vmSymbols.hpp
--- a/src/share/vm/includeDB_gc_parallel	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/includeDB_gc_parallel	Wed Mar 24 17:16:33 2010 -0700
@@ -21,6 +21,8 @@
 // have any questions.
 //  
 
+arguments.cpp                           compactibleFreeListSpace.hpp
+
 assembler_<arch>.cpp                    g1SATBCardTableModRefBS.hpp
 assembler_<arch>.cpp                    g1CollectedHeap.inline.hpp
 assembler_<arch>.cpp                    heapRegion.hpp
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -109,6 +109,8 @@
 
   static void       print_method_kind(MethodKind kind)          PRODUCT_RETURN;
 
+  static bool       can_be_compiled(methodHandle m);
+
   // Runtime support
 
   // length = invoke bytecode length (to advance to next bytecode)
--- a/src/share/vm/interpreter/bytecode.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/bytecode.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -102,7 +102,9 @@
   KlassHandle resolved_klass;
   constantPoolHandle constants(THREAD, _method->constants());
 
-  if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {
+  if (adjusted_invoke_code() == Bytecodes::_invokedynamic) {
+    LinkResolver::resolve_dynamic_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
+  } else if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {
     LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
   } else {
     LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
--- a/src/share/vm/interpreter/bytecode.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/bytecode.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -205,12 +205,14 @@
   bool is_invokespecial() const                  { return adjusted_invoke_code() == Bytecodes::_invokespecial; }
   bool is_invokedynamic() const                  { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
 
+  bool has_receiver() const                      { return !is_invokestatic() && !is_invokedynamic(); }
   bool has_giant_index() const                   { return is_invokedynamic(); }
 
   bool is_valid() const                          { return is_invokeinterface() ||
                                                           is_invokevirtual()   ||
                                                           is_invokestatic()    ||
-                                                          is_invokespecial();     }
+                                                          is_invokespecial()   ||
+                                                          is_invokedynamic(); }
 
   // Creation
   inline friend Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci);
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -281,7 +281,7 @@
 
 #define DO_BACKEDGE_CHECKS(skip, branch_pc)                                                         \
     if ((skip) <= 0) {                                                                              \
-      if (UseCompiler && UseLoopCounter) {                                                          \
+      if (UseLoopCounter) {                                                                         \
         bool do_OSR = UseOnStackReplacement;                                                        \
         BACKEDGE_COUNT->increment();                                                                \
         if (do_OSR) do_OSR = BACKEDGE_COUNT->reached_InvocationLimit();                             \
@@ -289,16 +289,12 @@
           nmethod*  osr_nmethod;                                                                    \
           OSR_REQUEST(osr_nmethod, branch_pc);                                                      \
           if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) {          \
-            intptr_t* buf;                                                                          \
-            CALL_VM(buf=SharedRuntime::OSR_migration_begin(THREAD), handle_exception);              \
+            intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD);                             \
             istate->set_msg(do_osr);                                                                \
             istate->set_osr_buf((address)buf);                                                      \
             istate->set_osr_entry(osr_nmethod->osr_entry());                                        \
             return;                                                                                 \
           }                                                                                         \
-        } else {                                                                                    \
-          INCR_INVOCATION_COUNT;                                                                    \
-          SAFEPOINT;                                                                                \
         }                                                                                           \
       }  /* UseCompiler ... */                                                                      \
       INCR_INVOCATION_COUNT;                                                                        \
@@ -1281,12 +1277,7 @@
           jfloat f;
           jdouble r;
           f = STACK_FLOAT(-1);
-#ifdef IA64
-          // IA64 gcc bug
-          r = ( f == 0.0f ) ? (jdouble) f : (jdouble) f + ia64_double_zero;
-#else
           r = (jdouble) f;
-#endif
           MORE_STACK(-1); // POP
           SET_STACK_DOUBLE(r, 1);
           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
--- a/src/share/vm/interpreter/bytecodeTracer.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -270,6 +270,8 @@
     st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name());
   } else if (tag.is_unresolved_klass()) {
     st->print_cr(" <unresolved klass at %d>", i);
+  } else if (tag.is_object()) {
+    st->print_cr(" " PTR_FORMAT, constants->object_at(i));
   } else {
     st->print_cr(" bad tag=%d at %d", tag.value(), i);
   }
@@ -282,18 +284,21 @@
   constantPoolOop constants = method()->constants();
   constantTag tag = constants->tag_at(i);
 
+  int nt_index = -1;
+
   switch (tag.value()) {
   case JVM_CONSTANT_InterfaceMethodref:
   case JVM_CONSTANT_Methodref:
   case JVM_CONSTANT_Fieldref:
+  case JVM_CONSTANT_NameAndType:
     break;
   default:
     st->print_cr(" bad tag=%d at %d", tag.value(), i);
     return;
   }
 
-  symbolOop name = constants->name_ref_at(orig_i);
-  symbolOop signature = constants->signature_ref_at(orig_i);
+  symbolOop name = constants->uncached_name_ref_at(i);
+  symbolOop signature = constants->uncached_signature_ref_at(i);
   st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string());
 }
 
--- a/src/share/vm/interpreter/bytecodes.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/bytecodes.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -357,7 +357,7 @@
   def(_invokespecial       , "invokespecial"       , "bjj"  , NULL    , T_ILLEGAL, -1, true);
   def(_invokestatic        , "invokestatic"        , "bjj"  , NULL    , T_ILLEGAL,  0, true);
   def(_invokeinterface     , "invokeinterface"     , "bjj__", NULL    , T_ILLEGAL, -1, true);
-  def(_invokedynamic       , "invokedynamic"       , "bjjjj", NULL    , T_ILLEGAL, -1, true );
+  def(_invokedynamic       , "invokedynamic"       , "bjjjj", NULL    , T_ILLEGAL,  0, true );
   def(_new                 , "new"                 , "bii"  , NULL    , T_OBJECT ,  1, true );
   def(_newarray            , "newarray"            , "bc"   , NULL    , T_OBJECT ,  0, true );
   def(_anewarray           , "anewarray"           , "bii"  , NULL    , T_OBJECT ,  0, true );
--- a/src/share/vm/interpreter/interpreter.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/interpreter.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -314,6 +314,20 @@
       break;
     }
 
+   case Bytecodes::_invokedynamic: {
+      Thread *thread = Thread::current();
+      ResourceMark rm(thread);
+      methodHandle mh(thread, method);
+      type = Bytecode_invoke_at(mh, bci)->result_type(thread);
+      // since the cache entry might not be initialized:
+      // (NOT needed for the old calling convension)
+      if (!is_top_frame) {
+        int index = Bytes::get_native_u4(bcp+1);
+        method->constants()->cache()->secondary_entry_at(index)->set_parameter_size(callee_parameters);
+      }
+      break;
+    }
+
     case Bytecodes::_ldc   :
       type = constant_pool_type( method, *(bcp+1) );
       break;
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -353,7 +353,7 @@
     assert(h_exception.not_null(), "NULL exceptions should be handled by athrow");
     assert(h_exception->is_oop(), "just checking");
     // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
-    if (!(h_exception->is_a(SystemDictionary::throwable_klass()))) {
+    if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) {
       if (ExitVMOnVerifyError) vm_exit(-1);
       ShouldNotReachHere();
     }
@@ -397,7 +397,7 @@
 
   // notify JVMTI of an exception throw; JVMTI will detect if this is a first
   // time throw or a stack unwinding throw and accordingly notify the debugger
-  if (JvmtiExport::can_post_exceptions()) {
+  if (JvmtiExport::can_post_on_exceptions()) {
     JvmtiExport::post_exception_throw(thread, h_method(), bcp(thread), h_exception());
   }
 
@@ -426,7 +426,7 @@
   }
   // notify debugger of an exception catch
   // (this is good for exceptions caught in native methods as well)
-  if (JvmtiExport::can_post_exceptions()) {
+  if (JvmtiExport::can_post_on_exceptions()) {
     JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL));
   }
 
@@ -585,7 +585,7 @@
   Handle exception(thread, thread->vm_result());
   assert(exception() != NULL, "vm result should be set");
   thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures)
-  if (!exception->is_a(SystemDictionary::threaddeath_klass())) {
+  if (!exception->is_a(SystemDictionary::ThreadDeath_klass())) {
     exception = get_preinitialized_exception(
                        SystemDictionary::IllegalMonitorStateException_klass(),
                        CATCH);
@@ -660,7 +660,7 @@
       tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
     }
     if (info.resolved_method()->method_holder() ==
-                                            SystemDictionary::object_klass()) {
+                                            SystemDictionary::Object_klass()) {
       // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
       // (see also cpCacheOop.cpp for details)
       methodHandle rm = info.resolved_method();
@@ -681,7 +681,7 @@
 IRT_END
 
 
-// First time execution:  Resolve symbols, create a permanent CallSiteImpl object.
+// First time execution:  Resolve symbols, create a permanent CallSite object.
 IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
   ResourceMark rm(thread);
 
@@ -708,21 +708,16 @@
   constantPoolHandle pool(thread, caller_method->constants());
   pool->set_invokedynamic();    // mark header to flag active call sites
 
-  int raw_index = four_byte_index(thread);
-  assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "invokedynamic indexes marked specially");
-
-  // there are two CPC entries that are of interest:
-  int site_index = constantPoolCacheOopDesc::decode_secondary_index(raw_index);
-  int main_index = pool->cache()->entry_at(site_index)->main_entry_index();
-  // and there is one CP entry, a NameAndType:
-  int nt_index = pool->map_instruction_operand_to_index(raw_index);
+  int site_index = four_byte_index(thread);
+  // there is a second CPC entries that is of interest; it caches signature info:
+  int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
 
   // first resolve the signature to a MH.invoke methodOop
   if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) {
     JvmtiHideSingleStepping jhss(thread);
     CallInfo info;
     LinkResolver::resolve_invoke(info, Handle(), pool,
-                                 raw_index, bytecode, CHECK);
+                                 site_index, bytecode, CHECK);
     // The main entry corresponds to a JVM_CONSTANT_NameAndType, and serves
     // as a common reference point for all invokedynamic call sites with
     // that exact call descriptor.  We will link it in the CP cache exactly
@@ -741,7 +736,7 @@
   assert(mh_invdyn.not_null() && mh_invdyn->is_method() && mh_invdyn->is_method_handle_invoke(),
          "correct result from LinkResolver::resolve_invokedynamic");
 
-  symbolHandle call_site_name(THREAD, pool->nt_name_ref_at(nt_index));
+  symbolHandle call_site_name(THREAD, pool->name_ref_at(site_index));
   Handle call_site
     = SystemDictionary::make_dynamic_call_site(caller_method->method_holder(),
                                                caller_method->method_idnum(),
@@ -753,61 +748,11 @@
   // In the secondary entry, the f1 field is the call site, and the f2 (index)
   // field is some data about the invoke site.
   int extra_data = 0;
-  pool->cache()->entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
+  pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
 }
 IRT_END
 
 
-// Called on first time execution, and also whenever the CallSite.target is null.
-// FIXME:  Do more of this in Java code.
-IRT_ENTRY(void, InterpreterRuntime::bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site)) {
-  methodHandle   mh_invdyn(thread, (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site));
-  Handle         mh_type(thread,   mh_invdyn->method_handle_type());
-  objArrayHandle mh_ptypes(thread, java_dyn_MethodType::ptypes(mh_type()));
-
-  // squish the arguments down to a single array
-  int nargs = mh_ptypes->length();
-  objArrayHandle arg_array;
-  {
-    objArrayOop aaoop = oopFactory::new_objArray(SystemDictionary::object_klass(), nargs, CHECK);
-    arg_array = objArrayHandle(thread, aaoop);
-  }
-  frame fr = thread->last_frame();
-  assert(fr.interpreter_frame_bcp() != NULL, "sanity");
-  int tos_offset = 0;
-  for (int i = nargs; --i >= 0; ) {
-    intptr_t* slot_addr = fr.interpreter_frame_tos_at(tos_offset++);
-    oop ptype = mh_ptypes->obj_at(i);
-    oop arg = NULL;
-    if (!java_lang_Class::is_primitive(ptype)) {
-      arg = *(oop*) slot_addr;
-    } else {
-      BasicType bt = java_lang_Class::primitive_type(ptype);
-      assert(frame::interpreter_frame_expression_stack_direction() < 0, "else reconsider this code");
-      jvalue value;
-      Interpreter::get_jvalue_in_slot(slot_addr, bt, &value);
-      tos_offset += type2size[bt]-1;
-      arg = java_lang_boxing_object::create(bt, &value, CHECK);
-      // FIXME:  These boxing objects are not canonicalized under
-      // the Java autoboxing rules.  They should be...
-      // The best approach would be to push the arglist creation into Java.
-      // The JVM should use a lower-level interface to communicate argument lists.
-    }
-    arg_array->obj_at_put(i, arg);
-  }
-
-  // now find the bootstrap method
-  oop bootstrap_mh_oop = instanceKlass::cast(fr.interpreter_frame_method()->method_holder())->bootstrap_method();
-  assert(bootstrap_mh_oop != NULL, "resolve_invokedynamic ensures a BSM");
-
-  // return the bootstrap method and argument array via vm_result/_2
-  thread->set_vm_result(bootstrap_mh_oop);
-  thread->set_vm_result_2(arg_array());
-}
-IRT_END
-
-
-
 //------------------------------------------------------------------------------------------------------------------------
 // Miscellaneous
 
@@ -1305,7 +1250,7 @@
   methodHandle mh(thread, fr.interpreter_frame_method());
   Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
   ArgumentSizeComputer asc(invoke->signature());
-  int size_of_arguments = (asc.size() + (invoke->is_invokestatic() ? 0 : 1)); // receiver
+  int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver
   Copy::conjoint_bytes(src_address, dest_address,
                        size_of_arguments * Interpreter::stackElementSize());
 IRT_END
--- a/src/share/vm/interpreter/interpreterRuntime.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -91,7 +91,6 @@
   // Calls
   static void    resolve_invoke       (JavaThread* thread, Bytecodes::Code bytecode);
   static void    resolve_invokedynamic(JavaThread* thread);
-  static void  bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site);
 
   // Breakpoints
   static void _breakpoint(JavaThread* thread, methodOopDesc* method, address bcp);
--- a/src/share/vm/interpreter/linkResolver.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/linkResolver.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -55,7 +55,7 @@
   // we should pick the vtable index from the resolved method.
   // Other than that case, there is no valid vtable index to specify.
   int vtable_index = methodOopDesc::invalid_vtable_index;
-  if (resolved_method->method_holder() == SystemDictionary::object_klass()) {
+  if (resolved_method->method_holder() == SystemDictionary::Object_klass()) {
     assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check");
     vtable_index = resolved_method->vtable_index();
   }
@@ -75,6 +75,8 @@
   _selected_method = selected_method;
   _vtable_index    = vtable_index;
   if (CompilationPolicy::mustBeCompiled(selected_method)) {
+    // This path is unusual, mostly used by the '-Xcomp' stress test mode.
+
     // Note: with several active threads, the mustBeCompiled may be true
     //       while canBeCompiled is false; remove assert
     // assert(CompilationPolicy::canBeCompiled(selected_method), "cannot compile");
@@ -82,6 +84,16 @@
       // don't force compilation, resolve was on behalf of compiler
       return;
     }
+    if (instanceKlass::cast(selected_method->method_holder())->is_not_initialized()) {
+      // 'is_not_initialized' means not only '!is_initialized', but also that
+      // initialization has not been started yet ('!being_initialized')
+      // Do not force compilation of methods in uninitialized classes.
+      // Note that doing this would throw an assert later,
+      // in CompileBroker::compile_method.
+      // We sometimes use the link resolver to do reflective lookups
+      // even before classes are initialized.
+      return;
+    }
     CompileBroker::compile_method(selected_method, InvocationEntryBci,
                                   methodHandle(), 0, "mustBeCompiled", CHECK);
   }
@@ -181,7 +193,7 @@
   // We'll check for the method name first, as that's most likely
   // to be false (so we'll short-circuit out of these tests).
   if (sel_method->name() == vmSymbols::clone_name() &&
-      sel_klass() == SystemDictionary::object_klass() &&
+      sel_klass() == SystemDictionary::Object_klass() &&
       resolved_klass->oop_is_array()) {
     // We need to change "protected" to "public".
     assert(flags.is_protected(), "clone not protected?");
@@ -223,6 +235,18 @@
   resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
 }
 
+void LinkResolver::resolve_dynamic_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) {
+  // The class is java.dyn.MethodHandle
+  resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
+
+  symbolHandle method_name = vmSymbolHandles::invoke_name();
+
+  symbolHandle method_signature(THREAD, pool->signature_ref_at(index));
+  KlassHandle  current_klass   (THREAD, pool->pool_holder());
+
+  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+}
+
 void LinkResolver::resolve_interface_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) {
 
   // resolve klass
@@ -1015,11 +1039,8 @@
 
   // This guy is reached from InterpreterRuntime::resolve_invokedynamic.
 
-  assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "must be secondary index");
-  int nt_index = pool->map_instruction_operand_to_index(raw_index);
-
   // At this point, we only need the signature, and can ignore the name.
-  symbolHandle method_signature(THREAD, pool->nt_signature_ref_at(nt_index));
+  symbolHandle method_signature(THREAD, pool->signature_ref_at(raw_index));  // raw_index works directly
   symbolHandle method_name = vmSymbolHandles::invoke_name();
   KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
 
--- a/src/share/vm/interpreter/linkResolver.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/linkResolver.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -133,6 +133,7 @@
 
   // static resolving for all calls except interface calls
   static void resolve_method          (methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS);
+  static void resolve_dynamic_method  (methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS);
   static void resolve_interface_method(methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS);
 
   // runtime/static resolving for fields
--- a/src/share/vm/interpreter/rewriter.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/rewriter.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -48,16 +48,6 @@
 }
 
 
-int Rewriter::add_extra_cp_cache_entry(int main_entry) {
-  // Hack: We put it on the map as an encoded value.
-  // The only place that consumes this is ConstantPoolCacheEntry::set_initial_state
-  int encoded = constantPoolCacheOopDesc::encode_secondary_index(main_entry);
-  int plain_secondary_index = _cp_cache_map.append(encoded);
-  return constantPoolCacheOopDesc::encode_secondary_index(plain_secondary_index);
-}
-
-
-
 // Creates a constant pool cache given a CPC map
 // This creates the constant pool cache initially in a state
 // that is unsafe for concurrent GC processing but sets it to
@@ -127,7 +117,7 @@
   assert(p[-1] == Bytecodes::_invokedynamic, "");
   int cp_index = Bytes::get_Java_u2(p);
   int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
-  int cpc2 = add_extra_cp_cache_entry(cpc);
+  int cpc2 = add_secondary_cp_cache_entry(cpc);
 
   // Replace the trailing four bytes with a CPC index for the dynamic
   // call site.  Unlike other CPC entries, there is one per bytecode,
@@ -137,7 +127,7 @@
   // all these entries.  That is the main reason invokedynamic
   // must have a five-byte instruction format.  (Of course, other JVM
   // implementations can use the bytes for other purposes.)
-  Bytes::put_native_u4(p, cpc2);
+  Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
   // Note: We use native_u4 format exclusively for 4-byte indexes.
 }
 
@@ -257,15 +247,22 @@
 
 void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
   ResourceMark rm(THREAD);
-  Rewriter     rw(klass, CHECK);
+  Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
   // (That's all, folks.)
 }
 
-Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
+
+void Rewriter::rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS) {
+  ResourceMark rm(THREAD);
+  Rewriter     rw(klass, cpool, methods, CHECK);
+  // (That's all, folks.)
+}
+
+
+Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS)
   : _klass(klass),
-    // gather starting points
-    _pool(   THREAD, klass->constants()),
-    _methods(THREAD, klass->methods())
+    _pool(cpool),
+    _methods(methods)
 {
   assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
 
--- a/src/share/vm/interpreter/rewriter.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/rewriter.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -43,16 +43,21 @@
   bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
   int maybe_add_cp_cache_entry(int i) { return has_cp_cache(i) ? _cp_map[i] : add_cp_cache_entry(i); }
   int add_cp_cache_entry(int cp_index) {
+    assert((cp_index & _secondary_entry_tag) == 0, "bad tag");
     assert(_cp_map[cp_index] == -1, "not twice on same cp_index");
     int cache_index = _cp_cache_map.append(cp_index);
     _cp_map.at_put(cp_index, cache_index);
     assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
     return cache_index;
   }
-  int add_extra_cp_cache_entry(int main_entry);
+  int add_secondary_cp_cache_entry(int main_cpc_entry) {
+    assert(main_cpc_entry < _cp_cache_map.length(), "must be earlier CP cache entry");
+    int cache_index = _cp_cache_map.append(main_cpc_entry | _secondary_entry_tag);
+    return cache_index;
+  }
 
   // All the work goes in here:
-  Rewriter(instanceKlassHandle klass, TRAPS);
+  Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS);
 
   void compute_index_maps();
   void make_constant_pool_cache(TRAPS);
@@ -65,4 +70,9 @@
  public:
   // Driver routine:
   static void rewrite(instanceKlassHandle klass, TRAPS);
+  static void rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS);
+
+  enum {
+    _secondary_entry_tag = nth_bit(30)
+  };
 };
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -178,14 +178,12 @@
 #endif // !PRODUCT
 EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries];
 EntryPoint TemplateInterpreter::_earlyret_entry;
-EntryPoint TemplateInterpreter::_return_unbox_entry;
 EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ];
 EntryPoint TemplateInterpreter::_continuation_entry;
 EntryPoint TemplateInterpreter::_safept_entry;
 
 address    TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
 address    TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
-address    TemplateInterpreter::_return_5_unbox_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
 
 DispatchTable TemplateInterpreter::_active_table;
 DispatchTable TemplateInterpreter::_normal_table;
@@ -253,22 +251,6 @@
     }
   }
 
-  if (EnableInvokeDynamic) {
-    CodeletMark cm(_masm, "unboxing return entry points");
-    Interpreter::_return_unbox_entry =
-      EntryPoint(
-        generate_return_unbox_entry_for(btos, 5),
-        generate_return_unbox_entry_for(ctos, 5),
-        generate_return_unbox_entry_for(stos, 5),
-        generate_return_unbox_entry_for(atos, 5), // cast conversion
-        generate_return_unbox_entry_for(itos, 5),
-        generate_return_unbox_entry_for(ltos, 5),
-        generate_return_unbox_entry_for(ftos, 5),
-        generate_return_unbox_entry_for(dtos, 5),
-        Interpreter::_return_entry[5].entry(vtos) // no unboxing for void
-      );
-  }
-
   { CodeletMark cm(_masm, "earlyret entry points");
     Interpreter::_earlyret_entry =
       EntryPoint(
@@ -319,8 +301,6 @@
     int index = Interpreter::TosState_as_index(states[j]);
     Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
     Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
-    if (EnableInvokeDynamic)
-      Interpreter::_return_5_unbox_addrs_by_index[index] = Interpreter::return_unbox_entry(states[j], 5);
   }
 
   { CodeletMark cm(_masm, "continuation entry points");
@@ -485,9 +465,11 @@
 void TemplateInterpreterGenerator::set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
   assert(t->is_valid(), "template must exist");
   switch (t->tos_in()) {
-    case btos: vep = __ pc(); __ pop(btos); bep = __ pc(); generate_and_dispatch(t); break;
-    case ctos: vep = __ pc(); __ pop(ctos); sep = __ pc(); generate_and_dispatch(t); break;
-    case stos: vep = __ pc(); __ pop(stos); sep = __ pc(); generate_and_dispatch(t); break;
+    case btos:
+    case ctos:
+    case stos:
+      ShouldNotReachHere();  // btos/ctos/stos should use itos.
+      break;
     case atos: vep = __ pc(); __ pop(atos); aep = __ pc(); generate_and_dispatch(t); break;
     case itos: vep = __ pc(); __ pop(itos); iep = __ pc(); generate_and_dispatch(t); break;
     case ltos: vep = __ pc(); __ pop(ltos); lep = __ pc(); generate_and_dispatch(t); break;
@@ -547,18 +529,6 @@
 }
 
 
-address TemplateInterpreter::return_unbox_entry(TosState state, int length) {
-  assert(EnableInvokeDynamic, "");
-  if (state == vtos) {
-    // no unboxing to do, actually
-    return return_entry(state, length);
-  } else {
-    assert(length == 5, "unboxing entries generated for invokedynamic only");
-    return _return_unbox_entry.entry(state);
-  }
-}
-
-
 address TemplateInterpreter::deopt_entry(TosState state, int length) {
   guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length");
   return _deopt_entry[length].entry(state);
--- a/src/share/vm/interpreter/templateInterpreter.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/templateInterpreter.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -110,14 +110,12 @@
 #endif // !PRODUCT
   static EntryPoint _return_entry[number_of_return_entries];    // entry points to return to from a call
   static EntryPoint _earlyret_entry;                            // entry point to return early from a call
-  static EntryPoint _return_unbox_entry;                        // entry point to unbox a return value from a call
   static EntryPoint _deopt_entry[number_of_deopt_entries];      // entry points to return to from a deoptimization
   static EntryPoint _continuation_entry;
   static EntryPoint _safept_entry;
 
   static address    _return_3_addrs_by_index[number_of_return_addrs];     // for invokevirtual   return entries
   static address    _return_5_addrs_by_index[number_of_return_addrs];     // for invokeinterface return entries
-  static address    _return_5_unbox_addrs_by_index[number_of_return_addrs]; // for invokedynamic bootstrap methods
 
   static DispatchTable _active_table;                           // the active    dispatch table (used by the interpreter for dispatch)
   static DispatchTable _normal_table;                           // the normal    dispatch table (used to set the active table in normal mode)
@@ -159,12 +157,10 @@
   // Support for invokes
   static address*   return_3_addrs_by_index_table()             { return _return_3_addrs_by_index; }
   static address*   return_5_addrs_by_index_table()             { return _return_5_addrs_by_index; }
-  static address*   return_5_unbox_addrs_by_index_table()       { return _return_5_unbox_addrs_by_index; }
   static int        TosState_as_index(TosState state);          // computes index into return_3_entry_by_index table
 
   static address    return_entry  (TosState state, int length);
   static address    deopt_entry   (TosState state, int length);
-  static address    return_unbox_entry(TosState state, int length);
 
   // Safepoint support
   static void       notice_safepoints();                        // stops the thread when reaching a safepoint
--- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -51,10 +51,7 @@
   address generate_WrongMethodType_handler();
   address generate_ArrayIndexOutOfBounds_handler(const char* name);
   address generate_continuation_for(TosState state);
-  address generate_return_entry_for(TosState state, int step, bool unbox = false);
-  address generate_return_unbox_entry_for(TosState state, int step) {
-    return generate_return_entry_for(state, step, true);
-  }
+  address generate_return_entry_for(TosState state, int step);
   address generate_earlyret_entry_for(TosState state);
   address generate_deopt_entry_for(TosState state, int step);
   address generate_safept_entry_for(TosState state, address runtime_entry);
--- a/src/share/vm/memory/barrierSet.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/barrierSet.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -41,11 +41,6 @@
 
 // count is number of array elements being written
 void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
-  assert(count <= (size_t)max_intx, "count too large");
-  HeapWord* end = start + objArrayOopDesc::array_size((int)count);
-#if 0
-  warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
-                   start,            count,              start,          end);
-#endif
-  Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, end));
+  // simply delegate to instance method
+  Universe::heap()->barrier_set()->write_ref_array(start, count);
 }
--- a/src/share/vm/memory/barrierSet.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/barrierSet.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -121,17 +121,18 @@
   virtual void read_ref_array(MemRegion mr) = 0;
   virtual void read_prim_array(MemRegion mr) = 0;
 
+  // Below length is the # array elements being written
   virtual void write_ref_array_pre(      oop* dst, int length) {}
   virtual void write_ref_array_pre(narrowOop* dst, int length) {}
-  inline void write_ref_array(MemRegion mr);
+  // Below count is the # array elements being written, starting
+  // at the address "start", which may not necessarily be HeapWord-aligned
+  inline void write_ref_array(HeapWord* start, size_t count);
 
-  // Static versions, suitable for calling from generated code.
+  // Static versions, suitable for calling from generated code;
+  // count is # array elements being written, starting with "start",
+  // which may not necessarily be HeapWord-aligned.
   static void static_write_ref_array_pre(HeapWord* start, size_t count);
   static void static_write_ref_array_post(HeapWord* start, size_t count);
-  // Narrow oop versions of the above; count is # of array elements being written,
-  // starting with "start", which is HeapWord-aligned.
-  static void static_write_ref_array_pre_narrow(HeapWord* start, size_t count);
-  static void static_write_ref_array_post_narrow(HeapWord* start, size_t count);
 
 protected:
   virtual void write_ref_array_work(MemRegion mr) = 0;
--- a/src/share/vm/memory/barrierSet.inline.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/barrierSet.inline.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -42,14 +42,34 @@
   }
 }
 
-void BarrierSet::write_ref_array(MemRegion mr) {
-  if (kind() == CardTableModRef) {
-    ((CardTableModRefBS*)this)->inline_write_ref_array(mr);
-  } else {
-    write_ref_array_work(mr);
-  }
+// count is number of array elements being written
+void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
+  assert(count <= (size_t)max_intx, "count too large");
+  HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
+  // In the case of compressed oops, start and end may potentially be misaligned;
+  // so we need to conservatively align the first downward (this is not
+  // strictly necessary for current uses, but a case of good hygiene and,
+  // if you will, aesthetics) and the second upward (this is essential for
+  // current uses) to a HeapWord boundary, so we mark all cards overlapping
+  // this write. If this evolves in the future to calling a
+  // logging barrier of narrow oop granularity, like the pre-barrier for G1
+  // (mentioned here merely by way of example), we will need to change this
+  // interface, so it is "exactly precise" (if i may be allowed the adverbial
+  // redundancy for emphasis) and does not include narrow oop slots not
+  // included in the original write interval.
+  HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
+  HeapWord* aligned_end   = (HeapWord*)align_size_up  ((uintptr_t)end,   HeapWordSize);
+  // If compressed oops were not being used, these should already be aligned
+  assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
+         "Expected heap word alignment of start and end");
+#if 0
+  warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
+                   start,            count,              aligned_start,   aligned_end);
+#endif
+  write_ref_array_work(MemRegion(aligned_start, aligned_end));
 }
 
+
 void BarrierSet::write_region(MemRegion mr) {
   if (kind() == CardTableModRef) {
     ((CardTableModRefBS*)this)->inline_write_region(mr);
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -511,6 +511,8 @@
 }
 
 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
+  assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+  assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   jbyte* cur  = byte_for(mr.start());
   jbyte* last = byte_after(mr.last());
   while (cur < last) {
@@ -520,6 +522,8 @@
 }
 
 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
+  assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+  assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   for (int i = 0; i < _cur_covered_regions; i++) {
     MemRegion mri = mr.intersection(_covered[i]);
     if (!mri.is_empty()) dirty_MemRegion(mri);
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -339,6 +339,16 @@
     return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
   }
 
+  void set_card_claimed(size_t card_index) {
+      jbyte val = _byte_map[card_index];
+      if (val == clean_card_val()) {
+        val = (jbyte)claimed_card_val();
+      } else {
+        val |= (jbyte)claimed_card_val();
+      }
+      _byte_map[card_index] = val;
+  }
+
   bool claim_card(size_t card_index);
 
   bool is_card_clean(size_t card_index) {
--- a/src/share/vm/memory/classify.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/classify.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -49,7 +49,7 @@
 
   Klass* k = obj->blueprint();
 
-  if (k->as_klassOop() == SystemDictionary::object_klass()) {
+  if (k->as_klassOop() == SystemDictionary::Object_klass()) {
     tty->print_cr("Found the class!");
   }
 
--- a/src/share/vm/memory/collectorPolicy.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/collectorPolicy.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -55,7 +55,7 @@
 
 void CollectorPolicy::initialize_size_info() {
   // User inputs from -mx and ms are aligned
-  set_initial_heap_byte_size(Arguments::initial_heap_size());
+  set_initial_heap_byte_size(InitialHeapSize);
   if (initial_heap_byte_size() == 0) {
     set_initial_heap_byte_size(NewSize + OldSize);
   }
--- a/src/share/vm/memory/defNewGeneration.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/defNewGeneration.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -609,7 +609,7 @@
 
     remove_forwarding_pointers();
     if (PrintGCDetails) {
-      gclog_or_tty->print(" (promotion failed)");
+      gclog_or_tty->print(" (promotion failed) ");
     }
     // Add to-space to the list of space to compact
     // when a promotion failure has occurred.  In that
@@ -620,6 +620,9 @@
     from()->set_next_compaction_space(to());
     gch->set_incremental_collection_will_fail();
 
+    // Inform the next generation that a promotion failure occurred.
+    _next_gen->promotion_failure_occurred();
+
     // Reset the PromotionFailureALot counters.
     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   }
@@ -679,6 +682,11 @@
 
 void DefNewGeneration::handle_promotion_failure(oop old) {
   preserve_mark_if_necessary(old, old->mark());
+  if (!_promotion_failed && PrintPromotionFailure) {
+    gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
+                        old->size());
+  }
+
   // forward to self
   old->forward_to(old);
   _promotion_failed = true;
--- a/src/share/vm/memory/dump.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/dump.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -63,7 +63,7 @@
   void do_oop(oop* p) {
     if (p != NULL) {
       oop obj = *p;
-      if (obj->klass() == SystemDictionary::string_klass()) {
+      if (obj->klass() == SystemDictionary::String_klass()) {
 
         int hash;
         typeArrayOop value = java_lang_String::value(obj);
@@ -625,11 +625,11 @@
 
     if (obj->is_klass() || obj->is_instance()) {
       if (obj->is_klass() ||
-          obj->is_a(SystemDictionary::class_klass()) ||
-          obj->is_a(SystemDictionary::throwable_klass())) {
+          obj->is_a(SystemDictionary::Class_klass()) ||
+          obj->is_a(SystemDictionary::Throwable_klass())) {
         // Do nothing
       }
-      else if (obj->is_a(SystemDictionary::string_klass())) {
+      else if (obj->is_a(SystemDictionary::String_klass())) {
         // immutable objects.
       } else {
         // someone added an object we hadn't accounted for.
--- a/src/share/vm/memory/genCollectedHeap.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -51,6 +51,8 @@
 }
 
 jint GenCollectedHeap::initialize() {
+  CollectedHeap::pre_initialize();
+
   int i;
   _n_gens = gen_policy()->number_of_generations();
 
@@ -129,6 +131,7 @@
 
   _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
   set_barrier_set(rem_set()->bs());
+
   _gch = this;
 
   for (i = 0; i < _n_gens; i++) {
@@ -925,6 +928,8 @@
   guarantee(VerifyBeforeGC   ||
             VerifyDuringGC   ||
             VerifyBeforeExit ||
+            PrintAssembly    ||
+            tty->count() != 0 ||   // already printing
             VerifyAfterGC, "too expensive");
   #endif
   // This might be sped up with a cache of the last generation that
--- a/src/share/vm/memory/genCollectedHeap.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -260,6 +260,10 @@
     return true;
   }
 
+  virtual bool card_mark_must_follow_store() const {
+    return UseConcMarkSweepGC;
+  }
+
   // We don't need barriers for stores to objects in the
   // young gen and, a fortiori, for initializing stores to
   // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
--- a/src/share/vm/memory/generation.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/generation.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -181,6 +181,12 @@
   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
     bool younger_handles_promotion_failure) const;
 
+  // For a non-young generation, this interface can be used to inform a
+  // generation that a promotion attempt into that generation failed.
+  // Typically used to enable diagnostic output for post-mortem analysis,
+  // but other uses of the interface are not ruled out.
+  virtual void promotion_failure_occurred() { /* does nothing */ }
+
   // Return an estimate of the maximum allocation that could be performed
   // in the generation without triggering any collection or expansion
   // activity.  It is "unsafe" because no locks are taken; the result
--- a/src/share/vm/memory/heap.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/heap.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -464,7 +464,7 @@
   }
 
   // Verify that freelist contains the right amount of free space
-  guarantee(len == _free_segments, "wrong freelist");
+  //  guarantee(len == _free_segments, "wrong freelist");
 
   // Verify that the number of free blocks is not out of hand.
   static int free_block_threshold = 10000;
@@ -479,5 +479,5 @@
   for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
     if (h->free()) count--;
   }
-  guarantee(count == 0, "missing free blocks");
+  //  guarantee(count == 0, "missing free blocks");
 }
--- a/src/share/vm/memory/iterator.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/iterator.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -296,23 +296,32 @@
 // RememberKlassesChecker can be passed "false" to turn off checking.
 // It is used by CMS when CMS yields to a different collector.
 class RememberKlassesChecker: StackObj {
- bool _state;
- bool _skip;
+ bool _saved_state;
+ bool _do_check;
  public:
-  RememberKlassesChecker(bool checking_on) : _state(false), _skip(false) {
-    _skip = !(ClassUnloading && !UseConcMarkSweepGC ||
-              CMSClassUnloadingEnabled && UseConcMarkSweepGC);
-    if (_skip) {
-      return;
+  RememberKlassesChecker(bool checking_on) : _saved_state(false),
+    _do_check(true) {
+    // The ClassUnloading unloading flag affects the collectors except
+    // for CMS.
+    // CMS unloads classes if CMSClassUnloadingEnabled is true or
+    // if ExplicitGCInvokesConcurrentAndUnloadsClasses is true and
+    // the current collection is an explicit collection.  Turning
+    // on the checking in general for
+    // ExplicitGCInvokesConcurrentAndUnloadsClasses and
+    // UseConcMarkSweepGC should not lead to false positives.
+    _do_check =
+      ClassUnloading && !UseConcMarkSweepGC ||
+      CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
+      ExplicitGCInvokesConcurrentAndUnloadsClasses && UseConcMarkSweepGC;
+    if (_do_check) {
+      _saved_state = OopClosure::must_remember_klasses();
+      OopClosure::set_must_remember_klasses(checking_on);
     }
-    _state = OopClosure::must_remember_klasses();
-    OopClosure::set_must_remember_klasses(checking_on);
   }
   ~RememberKlassesChecker() {
-    if (_skip) {
-      return;
+    if (_do_check) {
+      OopClosure::set_must_remember_klasses(_saved_state);
     }
-    OopClosure::set_must_remember_klasses(_state);
   }
 };
 #endif  // ASSERT
--- a/src/share/vm/memory/referenceProcessor.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/referenceProcessor.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -71,7 +71,7 @@
   assert(_sentinelRef == NULL, "should be initialized precisely once");
   EXCEPTION_MARK;
   _sentinelRef = instanceKlass::cast(
-                    SystemDictionary::reference_klass())->
+                    SystemDictionary::Reference_klass())->
                       allocate_permanent_instance(THREAD);
 
   // Initialize the master soft ref clock.
@@ -299,8 +299,8 @@
 
 
 template <class T>
-static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
-                                          AbstractRefProcTaskExecutor* task_executor) {
+bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
+                                   AbstractRefProcTaskExecutor* task_executor) {
 
   // Remember old value of pending references list
   T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
@@ -1227,13 +1227,16 @@
   BoolObjectClosure* is_alive,
   OopClosure* keep_alive,
   VoidClosure* complete_gc,
-  YieldClosure* yield) {
+  YieldClosure* yield,
+  bool should_unload_classes) {
 
   NOT_PRODUCT(verify_ok_to_handle_reflists());
 
 #ifdef ASSERT
   bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
-                               CMSClassUnloadingEnabled && UseConcMarkSweepGC;
+                               CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
+                               ExplicitGCInvokesConcurrentAndUnloadsClasses &&
+                                 UseConcMarkSweepGC && should_unload_classes;
   RememberKlassesChecker mx(must_remember_klasses);
 #endif
   // Soft references
--- a/src/share/vm/memory/referenceProcessor.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/referenceProcessor.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -170,11 +170,13 @@
   // The caller is responsible for taking care of potential
   // interference with concurrent operations on these lists
   // (or predicates involved) by other threads. Currently
-  // only used by the CMS collector.
+  // only used by the CMS collector.  should_unload_classes is
+  // used to aid assertion checking when classes are collected.
   void preclean_discovered_references(BoolObjectClosure* is_alive,
                                       OopClosure*        keep_alive,
                                       VoidClosure*       complete_gc,
-                                      YieldClosure*      yield);
+                                      YieldClosure*      yield,
+                                      bool               should_unload_classes);
 
   // Delete entries in the discovered lists that have
   // either a null referent or are not active. Such
@@ -261,10 +263,13 @@
     int                parallel_gc_threads = 1,
     bool               mt_processing = false,
     bool               discovered_list_needs_barrier = false);
+
   // RefDiscoveryPolicy values
-  enum {
+  enum DiscoveryPolicy {
     ReferenceBasedDiscovery = 0,
-    ReferentBasedDiscovery  = 1
+    ReferentBasedDiscovery  = 1,
+    DiscoveryPolicyMin      = ReferenceBasedDiscovery,
+    DiscoveryPolicyMax      = ReferentBasedDiscovery
   };
 
   static void init_statics();
--- a/src/share/vm/memory/sharedHeap.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/sharedHeap.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -224,10 +224,6 @@
                           CodeBlobClosure* code_roots,
                           OopClosure* non_root_closure);
 
-
-  // Like CollectedHeap::collect, but assume that the caller holds the Heap_lock.
-  virtual void collect_locked(GCCause::Cause cause) = 0;
-
   // The functions below are helper functions that a subclass of
   // "SharedHeap" can use in the implementation of its virtual
   // functions.
--- a/src/share/vm/memory/space.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/space.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -876,7 +876,7 @@
     instanceOop obj = (instanceOop) allocate(size);
     obj->set_mark(markOopDesc::prototype());
     obj->set_klass_gap(0);
-    obj->set_klass(SystemDictionary::object_klass());
+    obj->set_klass(SystemDictionary::Object_klass());
   }
 }
 
--- a/src/share/vm/memory/threadLocalAllocBuffer.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/threadLocalAllocBuffer.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -100,7 +100,7 @@
 void ThreadLocalAllocBuffer::make_parsable(bool retire) {
   if (end() != NULL) {
     invariants();
-    CollectedHeap::fill_with_object(top(), hard_end());
+    CollectedHeap::fill_with_object(top(), hard_end(), retire);
 
     if (retire || ZeroTLAB) {  // "Reset" the TLAB
       set_start(NULL);
--- a/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,8 +27,13 @@
   HeapWord* obj = top();
   if (pointer_delta(end(), obj) >= size) {
     // successful thread-local allocation
-
-    DEBUG_ONLY(Copy::fill_to_words(obj, size, badHeapWordVal));
+#ifdef ASSERT
+    // Skip mangling the space corresponding to the object header to
+    // ensure that the returned space is not considered parsable by
+    // any concurrent GC thread.
+    size_t hdr_size = CollectedHeap::min_fill_size();
+    Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
+#endif // ASSERT
     // This addition is safe because we know that top is
     // at least size below end, so the add can't wrap.
     set_top(obj + size);
--- a/src/share/vm/memory/universe.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/universe.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -67,6 +67,8 @@
 objArrayOop Universe::_the_empty_system_obj_array     = NULL;
 objArrayOop Universe::_the_empty_class_klass_array    = NULL;
 objArrayOop Universe::_the_array_interfaces_array     = NULL;
+oop Universe::_the_null_string                        = NULL;
+oop Universe::_the_min_jint_string                   = NULL;
 LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
 LatestMethodOopCache* Universe::_loader_addClass_cache    = NULL;
 ActiveMethodOopsCache* Universe::_reflect_invoke_cache    = NULL;
@@ -187,6 +189,8 @@
   f->do_oop((oop*)&_the_empty_system_obj_array);
   f->do_oop((oop*)&_the_empty_class_klass_array);
   f->do_oop((oop*)&_the_array_interfaces_array);
+  f->do_oop((oop*)&_the_null_string);
+  f->do_oop((oop*)&_the_min_jint_string);
   _finalizer_register_cache->oops_do(f);
   _loader_addClass_cache->oops_do(f);
   _reflect_invoke_cache->oops_do(f);
@@ -287,14 +291,17 @@
 
     SystemDictionary::initialize(CHECK);
 
-    klassOop ok = SystemDictionary::object_klass();
+    klassOop ok = SystemDictionary::Object_klass();
+
+    _the_null_string            = StringTable::intern("null", CHECK);
+    _the_min_jint_string       = StringTable::intern("-2147483648", CHECK);
 
     if (UseSharedSpaces) {
       // Verify shared interfaces array.
       assert(_the_array_interfaces_array->obj_at(0) ==
-             SystemDictionary::cloneable_klass(), "u3");
+             SystemDictionary::Cloneable_klass(), "u3");
       assert(_the_array_interfaces_array->obj_at(1) ==
-             SystemDictionary::serializable_klass(), "u3");
+             SystemDictionary::Serializable_klass(), "u3");
 
       // Verify element klass for system obj array klass
       assert(objArrayKlass::cast(_systemObjArrayKlassObj)->element_klass() == ok, "u1");
@@ -313,8 +320,8 @@
       assert(Klass::cast(systemObjArrayKlassObj())->super() == ok, "u3");
     } else {
       // Set up shared interfaces array.  (Do this before supers are set up.)
-      _the_array_interfaces_array->obj_at_put(0, SystemDictionary::cloneable_klass());
-      _the_array_interfaces_array->obj_at_put(1, SystemDictionary::serializable_klass());
+      _the_array_interfaces_array->obj_at_put(0, SystemDictionary::Cloneable_klass());
+      _the_array_interfaces_array->obj_at_put(1, SystemDictionary::Serializable_klass());
 
       // Set element klass for system obj array klass
       objArrayKlass::cast(_systemObjArrayKlassObj)->set_element_klass(ok);
@@ -358,7 +365,7 @@
   // Initialize _objectArrayKlass after core bootstraping to make
   // sure the super class is set up properly for _objectArrayKlass.
   _objectArrayKlassObj = instanceKlass::
-    cast(SystemDictionary::object_klass())->array_klass(1, CHECK);
+    cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
   // Add the class to the class hierarchy manually to make sure that
   // its vtable is initialized after core bootstrapping is completed.
   Klass::cast(_objectArrayKlassObj)->append_to_sibling_list();
@@ -419,11 +426,11 @@
     while (i < size) {
       if (!UseConcMarkSweepGC) {
         // Allocate dummy in old generation
-        oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_instance(CHECK);
+        oop dummy = instanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
         dummy_array->obj_at_put(i++, dummy);
       }
       // Allocate dummy in permanent generation
-      oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_permanent_instance(CHECK);
+      oop dummy = instanceKlass::cast(SystemDictionary::Object_klass())->allocate_permanent_instance(CHECK);
       dummy_array->obj_at_put(i++, dummy);
     }
     {
@@ -533,7 +540,7 @@
   // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
   // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
   // that the number of objects allocated at this point is very small.
-  assert(SystemDictionary::class_klass_loaded(), "java.lang.Class should be loaded");
+  assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
   FixupMirrorClosure blk;
   Universe::heap()->permanent_object_iterate(&blk);
 }
@@ -549,7 +556,7 @@
   if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
   {
     PRESERVE_EXCEPTION_MARK;
-    KlassHandle finalizer_klass(THREAD, SystemDictionary::finalizer_klass());
+    KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
     JavaValue result(T_VOID);
     JavaCalls::call_static(
       &result,
@@ -744,22 +751,22 @@
 static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 
 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+  size_t base = 0;
 #ifdef _LP64
   if (UseCompressedOops) {
     assert(mode == UnscaledNarrowOop  ||
            mode == ZeroBasedNarrowOop ||
            mode == HeapBasedNarrowOop, "mode is invalid");
+    const size_t total_size = heap_size + HeapBaseMinAddress;
     // Return specified base for the first request.
     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
-      return (char*)HeapBaseMinAddress;
-    }
-    const size_t total_size = heap_size + HeapBaseMinAddress;
-    if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
+      base = HeapBaseMinAddress;
+    } else if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
       if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
           (Universe::narrow_oop_shift() == 0)) {
         // Use 32-bits oops without encoding and
         // place heap's top on the 4Gb boundary
-        return (char*)(NarrowOopHeapMax - heap_size);
+        base = (NarrowOopHeapMax - heap_size);
       } else {
         // Can't reserve with NarrowOopShift == 0
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
@@ -768,16 +775,38 @@
           // Use zero based compressed oops with encoding and
           // place heap's top on the 32Gb boundary in case
           // total_size > 4Gb or failed to reserve below 4Gb.
-          return (char*)(OopEncodingHeapMax - heap_size);
+          base = (OopEncodingHeapMax - heap_size);
         }
       }
     } else {
       // Can't reserve below 32Gb.
       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
     }
+    // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
+    // used in ReservedHeapSpace() constructors.
+    // The final values will be set in initialize_heap() below.
+    if (base != 0 && (base + heap_size) <= OopEncodingHeapMax) {
+      // Use zero based compressed oops
+      Universe::set_narrow_oop_base(NULL);
+      // Don't need guard page for implicit checks in indexed
+      // addressing mode with zero based Compressed Oops.
+      Universe::set_narrow_oop_use_implicit_null_checks(true);
+    } else {
+      // Set to a non-NULL value so the ReservedSpace ctor computes
+      // the correct no-access prefix.
+      // The final value will be set in initialize_heap() below.
+      Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
+#ifdef _WIN64
+      if (UseLargePages) {
+        // Cannot allocate guard pages for implicit checks in indexed
+        // addressing mode when large pages are specified on windows.
+        Universe::set_narrow_oop_use_implicit_null_checks(false);
+      }
+#endif //  _WIN64
+    }
   }
 #endif
-  return NULL; // also return NULL (don't care) for 32-bit VM
+  return (char*)base; // also return NULL (don't care) for 32-bit VM
 }
 
 jint Universe::initialize_heap() {
@@ -921,7 +950,7 @@
   { ResourceMark rm;
     Interpreter::initialize();      // needed for interpreter entry points
     if (!UseSharedSpaces) {
-      KlassHandle ok_h(THREAD, SystemDictionary::object_klass());
+      KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
       Universe::reinitialize_vtable_of(ok_h, CHECK_false);
       Universe::reinitialize_itables(CHECK_false);
     }
@@ -931,7 +960,7 @@
   instanceKlassHandle k_h;
   if (!UseSharedSpaces) {
     // Setup preallocated empty java.lang.Class array
-    Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_false);
+    Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
     // Setup preallocated OutOfMemoryError errors
     k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(), true, CHECK_false);
     k_h = instanceKlassHandle(THREAD, k);
@@ -998,8 +1027,8 @@
   // Setup static method for registering finalizers
   // The finalizer klass must be linked before looking up the method, in
   // case it needs to get rewritten.
-  instanceKlass::cast(SystemDictionary::finalizer_klass())->link_class(CHECK_false);
-  methodOop m = instanceKlass::cast(SystemDictionary::finalizer_klass())->find_method(
+  instanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
+  methodOop m = instanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
                                   vmSymbols::register_method_name(),
                                   vmSymbols::register_method_signature());
   if (m == NULL || !m->is_static()) {
@@ -1007,7 +1036,7 @@
       "java.lang.ref.Finalizer.register", false);
   }
   Universe::_finalizer_register_cache->init(
-    SystemDictionary::finalizer_klass(), m, CHECK_false);
+    SystemDictionary::Finalizer_klass(), m, CHECK_false);
 
   // Resolve on first use and initialize class.
   // Note: No race-condition here, since a resolve will always return the same result
@@ -1024,14 +1053,14 @@
   Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
 
   // Setup method for registering loaded classes in class loader vector
-  instanceKlass::cast(SystemDictionary::classloader_klass())->link_class(CHECK_false);
-  m = instanceKlass::cast(SystemDictionary::classloader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
+  instanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
+  m = instanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
   if (m == NULL || m->is_static()) {
     THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
       "java.lang.ClassLoader.addClass", false);
   }
   Universe::_loader_addClass_cache->init(
-    SystemDictionary::classloader_klass(), m, CHECK_false);
+    SystemDictionary::ClassLoader_klass(), m, CHECK_false);
 
   // The folowing is initializing converter functions for serialization in
   // JVM.cpp. If we clean up the StrictMath code above we may want to find
--- a/src/share/vm/memory/universe.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/memory/universe.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -169,6 +169,8 @@
   static objArrayOop  _the_empty_system_obj_array;    // Canonicalized system obj array
   static objArrayOop  _the_empty_class_klass_array;   // Canonicalized obj array of type java.lang.Class
   static objArrayOop  _the_array_interfaces_array;    // Canonicalized 2-array of cloneable & serializable klasses
+  static oop          _the_null_string;               // A cache of "null" as a Java string
+  static oop          _the_min_jint_string;          // A cache of "-2147483648" as a Java string
   static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects
   static LatestMethodOopCache* _loader_addClass_cache;    // method for registering loaded classes in class loader vector
   static ActiveMethodOopsCache* _reflect_invoke_cache;    // method for security checks
@@ -310,6 +312,8 @@
   static objArrayOop  the_empty_system_obj_array ()   { return _the_empty_system_obj_array;    }
   static objArrayOop  the_empty_class_klass_array ()  { return _the_empty_class_klass_array;   }
   static objArrayOop  the_array_interfaces_array()    { return _the_array_interfaces_array;    }
+  static oop          the_null_string()               { return _the_null_string;               }
+  static oop          the_min_jint_string()          { return _the_min_jint_string;          }
   static methodOop    finalizer_register_method()     { return _finalizer_register_cache->get_methodOop(); }
   static methodOop    loader_addClass_method()        { return _loader_addClass_cache->get_methodOop(); }
   static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; }
--- a/src/share/vm/oops/arrayKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/arrayKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -43,7 +43,7 @@
   if (super() == NULL)  return NULL;  // bootstrap case
   // Array klasses have primary supertypes which are not reported to Java.
   // Example super chain:  String[][] -> Object[][] -> Object[] -> Object
-  return SystemDictionary::object_klass();
+  return SystemDictionary::Object_klass();
 }
 
 
@@ -82,7 +82,7 @@
   k = arrayKlassHandle(THREAD, base_klass());
 
   assert(!k()->is_parsable(), "not expecting parsability yet.");
-  k->set_super(Universe::is_bootstrapping() ? (klassOop)NULL : SystemDictionary::object_klass());
+  k->set_super(Universe::is_bootstrapping() ? (klassOop)NULL : SystemDictionary::Object_klass());
   k->set_layout_helper(Klass::_lh_neutral_value);
   k->set_dimension(1);
   k->set_higher_dimension(NULL);
@@ -117,9 +117,9 @@
 
 bool arrayKlass::compute_is_subtype_of(klassOop k) {
   // An array is a subtype of Serializable, Clonable, and Object
-  return    k == SystemDictionary::object_klass()
-         || k == SystemDictionary::cloneable_klass()
-         || k == SystemDictionary::serializable_klass();
+  return    k == SystemDictionary::Object_klass()
+         || k == SystemDictionary::Cloneable_klass()
+         || k == SystemDictionary::Serializable_klass();
 }
 
 
--- a/src/share/vm/oops/arrayKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/arrayKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -67,7 +67,7 @@
   // Compiler/Interpreter offset
   static ByteSize component_mirror_offset() { return byte_offset_of(arrayKlass, _component_mirror); }
 
-  virtual klassOop java_super() const;//{ return SystemDictionary::object_klass(); }
+  virtual klassOop java_super() const;//{ return SystemDictionary::Object_klass(); }
 
   // Allocation
   // Sizes points to the first dimension of the array, subsequent dimensions
--- a/src/share/vm/oops/arrayKlassKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/arrayKlassKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -159,7 +159,7 @@
   assert(obj->is_klass(), "must be klass");
   klassKlass::oop_print_on(obj, st);
 }
-
+#endif //PRODUCT
 
 void arrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
   assert(obj->is_klass(), "must be klass");
@@ -168,7 +168,6 @@
     st->print("[]");
   }
 }
-#endif
 
 
 const char* arrayKlassKlass::internal_name() const {
--- a/src/share/vm/oops/arrayKlassKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/arrayKlassKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -55,14 +55,13 @@
   int oop_oop_iterate(oop obj, OopClosure* blk);
   int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on(oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
 
- public:
   // Verification
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/compiledICHolderKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/compiledICHolderKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -166,12 +166,12 @@
   st->print(" - klass:  "); c->holder_klass()->print_value_on(st); st->cr();
 }
 
+#endif //PRODUCT
 
 void compiledICHolderKlass::oop_print_value_on(oop obj, outputStream* st) {
   assert(obj->is_compiledICHolder(), "must be compiledICHolder");
   Klass::oop_print_value_on(obj, st);
 }
-#endif
 
 const char* compiledICHolderKlass::internal_name() const {
   return "{compiledICHolder}";
--- a/src/share/vm/oops/compiledICHolderKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/compiledICHolderKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -68,14 +68,13 @@
   int  oop_oop_iterate(oop obj, OopClosure* blk);
   int  oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on      (oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
 
- public:
   // Verification
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/constMethodKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/constMethodKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -216,6 +216,7 @@
   }
 }
 
+#endif //PRODUCT
 
 // Short version of printing constMethodOop - just print the name of the
 // method it belongs to.
@@ -226,8 +227,6 @@
   m->method()->print_value_on(st);
 }
 
-#endif // PRODUCT
-
 const char* constMethodKlass::internal_name() const {
   return "{constMethod}";
 }
--- a/src/share/vm/oops/constMethodKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/constMethodKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -77,15 +77,13 @@
   int oop_oop_iterate(oop obj, OopClosure* blk);
   int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on      (oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
+#endif //PRODUCT
 
-#endif
-
- public:
   // Verify operations
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/constMethodOop.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/constMethodOop.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -258,6 +258,11 @@
   LocalVariableTableElement* localvariable_table_start() const;
 
   // byte codes
+  void    set_code(address code) {
+    if (code_size() > 0) {
+      memcpy(code_base(), code, code_size());
+    }
+  }
   address code_base() const            { return (address) (this+1); }
   address code_end() const             { return code_base() + code_size(); }
   bool    contains(address bcp) const  { return code_base() <= bcp
--- a/src/share/vm/oops/constantPoolKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/constantPoolKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -387,8 +387,18 @@
   cp->set_cache(cache());
 }
 
+#endif
 
-#endif
+void constantPoolKlass::oop_print_value_on(oop obj, outputStream* st) {
+  assert(obj->is_constantPool(), "must be constantPool");
+  constantPoolOop cp = constantPoolOop(obj);
+  st->print("constant pool [%d]", cp->length());
+  if (cp->has_pseudo_string()) st->print("/pseudo_string");
+  if (cp->has_invokedynamic()) st->print("/invokedynamic");
+  cp->print_address_on(st);
+  st->print(" for ");
+  cp->pool_holder()->print_value_on(st);
+}
 
 const char* constantPoolKlass::internal_name() const {
   return "{constant pool}";
--- a/src/share/vm/oops/constantPoolKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/constantPoolKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -65,9 +65,10 @@
   juint alloc_size() const              { return _alloc_size; }
   void set_alloc_size(juint n)          { _alloc_size = n; }
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on(oop obj, outputStream* st);
 #endif
 
--- a/src/share/vm/oops/constantPoolOop.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/constantPoolOop.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -110,7 +110,7 @@
         }
 
         if (!PENDING_EXCEPTION->
-              is_a(SystemDictionary::linkageError_klass())) {
+              is_a(SystemDictionary::LinkageError_klass())) {
           // Just throw the exception and don't prevent these classes from
           // being loaded due to virtual machine errors like StackOverflow
           // and OutOfMemoryError, etc, or if the thread was hit by stop()
@@ -262,25 +262,48 @@
 
 
 int constantPoolOopDesc::impl_name_and_type_ref_index_at(int which, bool uncached) {
-  jint ref_index = field_or_method_at(which, uncached);
+  int i = which;
+  if (!uncached && cache() != NULL) {
+    if (constantPoolCacheOopDesc::is_secondary_index(which))
+      // Invokedynamic indexes are always processed in native order
+      // so there is no question of reading a native u2 in Java order here.
+      return cache()->main_entry_at(which)->constant_pool_index();
+    // change byte-ordering and go via cache
+    i = remap_instruction_operand_from_cache(which);
+  } else {
+    if (tag_at(which).is_name_and_type())
+      // invokedynamic index is a simple name-and-type
+      return which;
+  }
+  assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
+  jint ref_index = *int_at_addr(i);
   return extract_high_short_from_int(ref_index);
 }
 
 
 int constantPoolOopDesc::impl_klass_ref_index_at(int which, bool uncached) {
-  jint ref_index = field_or_method_at(which, uncached);
+  guarantee(!constantPoolCacheOopDesc::is_secondary_index(which),
+            "an invokedynamic instruction does not have a klass");
+  int i = which;
+  if (!uncached && cache() != NULL) {
+    // change byte-ordering and go via cache
+    i = remap_instruction_operand_from_cache(which);
+  }
+  assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
+  jint ref_index = *int_at_addr(i);
   return extract_low_short_from_int(ref_index);
 }
 
 
 
-int constantPoolOopDesc::map_instruction_operand_to_index(int operand) {
-  if (constantPoolCacheOopDesc::is_secondary_index(operand)) {
-    return cache()->main_entry_at(operand)->constant_pool_index();
-  }
+int constantPoolOopDesc::remap_instruction_operand_from_cache(int operand) {
+  // Operand was fetched by a stream using get_Java_u2, yet was stored
+  // by Rewriter::rewrite_member_reference in native order.
+  // So now we have to fix the damage by swapping back to native order.
   assert((int)(u2)operand == operand, "clean u2");
-  int index = Bytes::swap_u2(operand);
-  return cache()->entry_at(index)->constant_pool_index();
+  int cpc_index = Bytes::swap_u2(operand);
+  int member_index = cache()->entry_at(cpc_index)->constant_pool_index();
+  return member_index;
 }
 
 
--- a/src/share/vm/oops/constantPoolOop.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/constantPoolOop.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -191,6 +191,16 @@
     }
   }
 
+  void object_at_put(int which, oop str) {
+    oop_store((volatile oop*) obj_at_addr(which), str);
+    release_tag_at_put(which, JVM_CONSTANT_Object);
+    if (UseConcMarkSweepGC) {
+      // In case the earlier card-mark was consumed by a concurrent
+      // marking thread before the tag was updated, redirty the card.
+      oop_store_without_check((volatile oop*) obj_at_addr(which), str);
+    }
+  }
+
   // For temporary use while constructing constant pool
   void string_index_at_put(int which, int string_index) {
     tag_at_put(which, JVM_CONSTANT_StringIndex);
@@ -228,7 +238,8 @@
       tag.is_unresolved_klass() ||
       tag.is_symbol() ||
       tag.is_unresolved_string() ||
-      tag.is_string();
+      tag.is_string() ||
+      tag.is_object();
   }
 
   // Fetching constants
@@ -291,6 +302,11 @@
     return string_at_impl(h_this, which, CHECK_NULL);
   }
 
+  oop object_at(int which) {
+    assert(tag_at(which).is_object(), "Corrupted constant pool");
+    return *obj_at_addr(which);
+  }
+
   // A "pseudo-string" is an non-string oop that has found is way into
   // a String entry.
   // Under AnonymousClasses this can happen if the user patches a live
@@ -342,12 +358,14 @@
   }
 
   // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
-  // name_and_type_ref_index_at) all expect constant pool indices
-  // from the bytecodes to be passed in, which are actually potentially byte-swapped
-  // or rewritten constant pool cache indices.  They all call map_instruction_operand_to_index.
-  int map_instruction_operand_to_index(int operand);
+  // name_and_type_ref_index_at) all expect to be passed indices obtained
+  // directly from the bytecode, and extracted according to java byte order.
+  // If the indices are meant to refer to fields or methods, they are
+  // actually potentially byte-swapped, rewritten constant pool cache indices.
+  // The routine remap_instruction_operand_from_cache manages the adjustment
+  // of these values back to constant pool indices.
 
-  // There are also "uncached" versions which do not map the operand index; see below.
+  // There are also "uncached" versions which do not adjust the operand index; see below.
 
   // Lookup for entries consisting of (klass_index, name_and_type index)
   klassOop klass_ref_at(int which, TRAPS);
@@ -361,8 +379,6 @@
   // Lookup for entries consisting of (name_index, signature_index)
   int name_ref_index_at(int which_nt);            // ==  low-order jshort of name_and_type_at(which_nt)
   int signature_ref_index_at(int which_nt);       // == high-order jshort of name_and_type_at(which_nt)
-  symbolOop nt_name_ref_at(int which_nt)          { return symbol_at(name_ref_index_at(which_nt)); }
-  symbolOop nt_signature_ref_at(int which_nt)     { return symbol_at(signature_ref_index_at(which_nt)); }
 
   BasicType basic_type_for_signature_at(int which);
 
@@ -425,18 +441,7 @@
   int       impl_klass_ref_index_at(int which, bool uncached);
   int       impl_name_and_type_ref_index_at(int which, bool uncached);
 
-  // Takes either a constant pool cache index in possibly byte-swapped
-  // byte order (which comes from the bytecodes after rewriting) or,
-  // if "uncached" is true, a vanilla constant pool index
-  jint field_or_method_at(int which, bool uncached) {
-    int i = which;
-    if (!uncached && cache() != NULL) {
-      // change byte-ordering and go via cache
-      i = map_instruction_operand_to_index(which);
-    }
-    assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
-    return *int_at_addr(i);
-  }
+  int remap_instruction_operand_from_cache(int operand);
 
   // Used while constructing constant pool (only by ClassFileParser)
   jint klass_index_at(int which) {
--- a/src/share/vm/oops/cpCacheKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/cpCacheKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -261,6 +261,15 @@
 
 #endif
 
+void constantPoolCacheKlass::oop_print_value_on(oop obj, outputStream* st) {
+  assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
+  constantPoolCacheOop cache = (constantPoolCacheOop)obj;
+  st->print("cache [%d]", cache->length());
+  cache->print_address_on(st);
+  st->print(" for ");
+  cache->constant_pool()->print_value_on(st);
+}
+
 void constantPoolCacheKlass::oop_verify_on(oop obj, outputStream* st) {
   guarantee(obj->is_constantPoolCache(), "obj must be constant pool cache");
   constantPoolCacheOop cache = (constantPoolCacheOop)obj;
--- a/src/share/vm/oops/cpCacheKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/cpCacheKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -61,9 +61,10 @@
   juint alloc_size() const              { return _alloc_size; }
   void set_alloc_size(juint n)          { _alloc_size = n; }
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on(oop obj, outputStream* st);
 #endif
 
--- a/src/share/vm/oops/cpCacheOop.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/cpCacheOop.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -28,21 +28,17 @@
 
 // Implememtation of ConstantPoolCacheEntry
 
-void ConstantPoolCacheEntry::set_initial_state(int index) {
-  if (constantPoolCacheOopDesc::is_secondary_index(index)) {
-    // Hack:  The rewriter is trying to say that this entry itself
-    // will be a secondary entry.
-    int main_index = constantPoolCacheOopDesc::decode_secondary_index(index);
-    assert(0 <= main_index && main_index < 0x10000, "sanity check");
-    _indices = (main_index << 16);
-    assert(main_entry_index() == main_index, "");
-    return;
-  }
+void ConstantPoolCacheEntry::initialize_entry(int index) {
   assert(0 < index && index < 0x10000, "sanity check");
   _indices = index;
   assert(constant_pool_index() == index, "");
 }
 
+void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
+  assert(0 <= main_index && main_index < 0x10000, "sanity check");
+  _indices = (main_index << 16);
+  assert(main_entry_index() == main_index, "");
+}
 
 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
                     bool is_vfinal, bool is_volatile,
@@ -223,10 +219,10 @@
 
 
 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) {
-  methodOop method = (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site());
+  methodOop method = (methodOop) java_dyn_CallSite::vmmethod(call_site());
   assert(method->is_method(), "must be initialized properly");
   int param_size = method->size_of_parameters();
-  assert(param_size > 1, "method argument size must include MH.this & initial dynamic receiver");
+  assert(param_size >= 1, "method argument size must include MH.this");
   param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
   if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
     // racing threads might be trying to install their own favorites
@@ -439,7 +435,18 @@
 
 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
   assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
-  for (int i = 0; i < length(); i++) entry_at(i)->set_initial_state(inverse_index_map[i]);
+  for (int i = 0; i < length(); i++) {
+    ConstantPoolCacheEntry* e = entry_at(i);
+    int original_index = inverse_index_map[i];
+    if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
+      int main_index = (original_index - Rewriter::_secondary_entry_tag);
+      assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
+      e->initialize_secondary_entry(main_index);
+    } else {
+      e->initialize_entry(original_index);
+    }
+    assert(entry_at(i) == e, "sanity");
+  }
 }
 
 // RedefineClasses() API support:
--- a/src/share/vm/oops/cpCacheOop.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/cpCacheOop.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -154,7 +154,8 @@
   };
 
   // Initialization
-  void set_initial_state(int index);             // sets entry to initial state
+  void initialize_entry(int original_index);     // initialize primary entry
+  void initialize_secondary_entry(int main_index); // initialize secondary entry
 
   void set_field(                                // sets entry to resolved field state
     Bytecodes::Code get_code,                    // the bytecode used for reading the field
@@ -251,6 +252,7 @@
 
   // Code generation support
   static WordSize size()                         { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); }
+  static ByteSize size_in_bytes()                { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); }
   static ByteSize indices_offset()               { return byte_offset_of(ConstantPoolCacheEntry, _indices); }
   static ByteSize f1_offset()                    { return byte_offset_of(ConstantPoolCacheEntry, _f1); }
   static ByteSize f2_offset()                    { return byte_offset_of(ConstantPoolCacheEntry, _f2); }
@@ -321,6 +323,7 @@
   ConstantPoolCacheEntry* base() const           { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); }
 
   friend class constantPoolCacheKlass;
+  friend class ConstantPoolCacheEntry;
 
  public:
   // Initialization
@@ -329,7 +332,8 @@
   // Secondary indexes.
   // They must look completely different from normal indexes.
   // The main reason is that byte swapping is sometimes done on normal indexes.
-  // Also, it is helpful for debugging to tell the two apart.
+  // Also, some of the CP accessors do different things for secondary indexes.
+  // Finally, it is helpful for debugging to tell the two apart.
   static bool is_secondary_index(int i) { return (i < 0); }
   static int  decode_secondary_index(int i) { assert(is_secondary_index(i),  ""); return ~i; }
   static int  encode_secondary_index(int i) { assert(!is_secondary_index(i), ""); return ~i; }
@@ -337,18 +341,35 @@
   // Accessors
   void set_constant_pool(constantPoolOop pool)   { oop_store_without_check((oop*)&_constant_pool, (oop)pool); }
   constantPoolOop constant_pool() const          { return _constant_pool; }
-  ConstantPoolCacheEntry* entry_at(int i) const  { assert(0 <= i && i < length(), "index out of bounds"); return base() + i; }
+  // Fetches the entry at the given index.
+  // The entry may be either primary or secondary.
+  // In either case the index must not be encoded or byte-swapped in any way.
+  ConstantPoolCacheEntry* entry_at(int i) const {
+    assert(0 <= i && i < length(), "index out of bounds");
+    return base() + i;
+  }
+  // Fetches the secondary entry referred to by index.
+  // The index may be a secondary index, and must not be byte-swapped.
+  ConstantPoolCacheEntry* secondary_entry_at(int i) const {
+    int raw_index = i;
+    if (is_secondary_index(i)) {  // correct these on the fly
+      raw_index = decode_secondary_index(i);
+    }
+    assert(entry_at(raw_index)->is_secondary_entry(), "not a secondary entry");
+    return entry_at(raw_index);
+  }
+  // Given a primary or secondary index, fetch the corresponding primary entry.
+  // Indirect through the secondary entry, if the index is encoded as a secondary index.
+  // The index must not be byte-swapped.
   ConstantPoolCacheEntry* main_entry_at(int i) const {
-    ConstantPoolCacheEntry* e;
+    int primary_index = i;
     if (is_secondary_index(i)) {
       // run through an extra level of indirection:
-      i = decode_secondary_index(i);
-      e = entry_at(i);
-      i = e->main_entry_index();
+      int raw_index = decode_secondary_index(i);
+      primary_index = entry_at(raw_index)->main_entry_index();
     }
-    e = entry_at(i);
-    assert(!e->is_secondary_entry(), "only one level of indirection");
-    return e;
+    assert(!entry_at(primary_index)->is_secondary_entry(), "only one level of indirection");
+    return entry_at(primary_index);
   }
 
   // GC support
@@ -359,6 +380,12 @@
 
   // Code generation
   static ByteSize base_offset()                  { return in_ByteSize(sizeof(constantPoolCacheOopDesc)); }
+  static ByteSize entry_offset(int raw_index) {
+    int index = raw_index;
+    if (is_secondary_index(raw_index))
+      index = decode_secondary_index(raw_index);
+    return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index);
+  }
 
   // RedefineClasses() API support:
   // If any entry of this constantPoolCache points to any of
--- a/src/share/vm/oops/generateOopMap.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/generateOopMap.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1559,7 +1559,7 @@
     case Bytecodes::_invokevirtual:
     case Bytecodes::_invokespecial:     do_method(false, false, itr->get_index_big(), itr->bci()); break;
     case Bytecodes::_invokestatic:      do_method(true,  false, itr->get_index_big(), itr->bci()); break;
-    case Bytecodes::_invokedynamic:     do_method(false, true,  itr->get_index_int(), itr->bci()); break;
+    case Bytecodes::_invokedynamic:     do_method(true,  false, itr->get_index_int(), itr->bci()); break;
     case Bytecodes::_invokeinterface:   do_method(false, true,  itr->get_index_big(), itr->bci()); break;
     case Bytecodes::_newarray:
     case Bytecodes::_anewarray:         pp_new_ref(vCTS, itr->bci()); break;
@@ -1830,12 +1830,8 @@
 
 
 void GenerateOopMap::do_ldc(int idx, int bci) {
-  constantPoolOop cp = method()->constants();
-  constantTag tag    = cp->tag_at(idx);
-
-  CellTypeState cts = (tag.is_string() || tag.is_unresolved_string() ||
-                       tag.is_klass()  || tag.is_unresolved_klass())
-                    ? CellTypeState::make_line_ref(bci) : valCTS;
+  constantPoolOop cp  = method()->constants();
+  CellTypeState   cts = cp->is_pointer_entry(idx) ? CellTypeState::make_line_ref(bci) : valCTS;
   ppush1(cts);
 }
 
@@ -1900,11 +1896,9 @@
 }
 
 void GenerateOopMap::do_method(int is_static, int is_interface, int idx, int bci) {
-  // Dig up signature for field in constant pool
-  constantPoolOop cp    = _method->constants();
-  int nameAndTypeIdx    = cp->name_and_type_ref_index_at(idx);
-  int signatureIdx      = cp->signature_ref_index_at(nameAndTypeIdx);  // @@@@@
-  symbolOop signature   = cp->symbol_at(signatureIdx);
+ // Dig up signature for field in constant pool
+  constantPoolOop cp  = _method->constants();
+  symbolOop signature = cp->signature_ref_at(idx);
 
   // Parse method signature
   CellTypeState out[4];
--- a/src/share/vm/oops/instanceKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/instanceKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -383,7 +383,7 @@
       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
     }
-    if (e->is_a(SystemDictionary::error_klass())) {
+    if (e->is_a(SystemDictionary::Error_klass())) {
       THROW_OOP(e());
     } else {
       JavaCallArguments args(e);
@@ -568,7 +568,7 @@
     THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
               : vmSymbols::java_lang_InstantiationException(), external_name());
   }
-  if (as_klassOop() == SystemDictionary::class_klass()) {
+  if (as_klassOop() == SystemDictionary::Class_klass()) {
     ResourceMark rm(THREAD);
     THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
               : vmSymbols::java_lang_IllegalAccessException(), external_name());
@@ -2045,8 +2045,9 @@
     // As we walk along, look for equalities between outer1 and class2.
     // Eventually, the walks will terminate as outer1 stops
     // at the top-level class around the original class.
-    symbolOop ignore_name;
-    klassOop next = outer1->compute_enclosing_class(ignore_name, CHECK_false);
+    bool ignore_inner_is_member;
+    klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
+                                                    CHECK_false);
     if (next == NULL)  break;
     if (next == class2())  return true;
     outer1 = instanceKlassHandle(THREAD, next);
@@ -2055,8 +2056,9 @@
   // Now do the same for class2.
   instanceKlassHandle outer2 = class2;
   for (;;) {
-    symbolOop ignore_name;
-    klassOop next = outer2->compute_enclosing_class(ignore_name, CHECK_false);
+    bool ignore_inner_is_member;
+    klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
+                                                    CHECK_false);
     if (next == NULL)  break;
     // Might as well check the new outer against all available values.
     if (next == class1())  return true;
@@ -2223,7 +2225,7 @@
 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
   Klass::oop_print_on(obj, st);
 
-  if (as_klassOop() == SystemDictionary::string_klass()) {
+  if (as_klassOop() == SystemDictionary::String_klass()) {
     typeArrayOop value  = java_lang_String::value(obj);
     juint        offset = java_lang_String::offset(obj);
     juint        length = java_lang_String::length(obj);
@@ -2243,7 +2245,7 @@
   FieldPrinter print_nonstatic_field(st, obj);
   do_nonstatic_fields(&print_nonstatic_field);
 
-  if (as_klassOop() == SystemDictionary::class_klass()) {
+  if (as_klassOop() == SystemDictionary::Class_klass()) {
     st->print(BULLET"signature: ");
     java_lang_Class::print_signature(obj, st);
     st->cr();
@@ -2266,11 +2268,13 @@
   }
 }
 
+#endif //PRODUCT
+
 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
   st->print("a ");
   name()->print_value_on(st);
   obj->print_address_on(st);
-  if (as_klassOop() == SystemDictionary::string_klass()
+  if (as_klassOop() == SystemDictionary::String_klass()
       && java_lang_String::value(obj) != NULL) {
     ResourceMark rm;
     int len = java_lang_String::length(obj);
@@ -2279,7 +2283,7 @@
     st->print(" = \"%s\"", str);
     if (len > plen)
       st->print("...[%d]", len);
-  } else if (as_klassOop() == SystemDictionary::class_klass()) {
+  } else if (as_klassOop() == SystemDictionary::Class_klass()) {
     klassOop k = java_lang_Class::as_klassOop(obj);
     st->print(" = ");
     if (k != NULL) {
@@ -2297,8 +2301,6 @@
   }
 }
 
-#endif // ndef PRODUCT
-
 const char* instanceKlass::internal_name() const {
   return external_name();
 }
@@ -2346,7 +2348,7 @@
 
     // Check that we have the right class
     static bool first_time = true;
-    guarantee(k == SystemDictionary::class_klass() && first_time, "Invalid verify of maps");
+    guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
     first_time = false;
     const int extra = java_lang_Class::number_of_fake_oop_fields;
     guarantee(ik->nonstatic_field_size() == extra, "just checking");
--- a/src/share/vm/oops/instanceKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/instanceKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -337,12 +337,12 @@
   static bool is_same_class_package(oop class_loader1, symbolOop class_name1, oop class_loader2, symbolOop class_name2);
 
   // find an enclosing class (defined where original code was, in jvm.cpp!)
-  klassOop compute_enclosing_class(symbolOop& simple_name_result, TRAPS) {
+  klassOop compute_enclosing_class(bool* inner_is_member, TRAPS) {
     instanceKlassHandle self(THREAD, this->as_klassOop());
-    return compute_enclosing_class_impl(self, simple_name_result, THREAD);
+    return compute_enclosing_class_impl(self, inner_is_member, THREAD);
   }
   static klassOop compute_enclosing_class_impl(instanceKlassHandle self,
-                                               symbolOop& simple_name_result, TRAPS);
+                                               bool* inner_is_member, TRAPS);
 
   // tell if two classes have the same enclosing class (at package level)
   bool is_same_package_member(klassOop class2, TRAPS) {
@@ -839,17 +839,16 @@
   // JVMTI support
   jint jvmti_class_status() const;
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on      (oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
 
   void print_dependent_nmethods(bool verbose = false);
   bool is_dependent_nmethod(nmethod* nm);
 #endif
 
- public:
   // Verification
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/instanceKlassKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/instanceKlassKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -317,6 +317,11 @@
     pm->claim_or_forward_breadth(sg_addr);
   }
 
+  oop* bsm_addr = ik->adr_bootstrap_method();
+  if (PSScavenge::should_scavenge(bsm_addr)) {
+    pm->claim_or_forward_breadth(bsm_addr);
+  }
+
   klassKlass::oop_copy_contents(pm, obj);
 }
 
@@ -345,6 +350,11 @@
     pm->claim_or_forward_depth(sg_addr);
   }
 
+  oop* bsm_addr = ik->adr_bootstrap_method();
+  if (PSScavenge::should_scavenge(bsm_addr)) {
+    pm->claim_or_forward_depth(bsm_addr);
+  }
+
   klassKlass::oop_copy_contents(pm, obj);
 }
 
@@ -628,6 +638,7 @@
   st->cr();
 }
 
+#endif //PRODUCT
 
 void instanceKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
   assert(obj->is_klass(), "must be klass");
@@ -635,8 +646,6 @@
   ik->name()->print_value_on(st);
 }
 
-#endif // PRODUCT
-
 const char* instanceKlassKlass::internal_name() const {
   return "{instance class}";
 }
--- a/src/share/vm/oops/instanceKlassKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/instanceKlassKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -69,14 +69,13 @@
   // Apply closure to the InstanceKlass oops that are outside the java heap.
   inline void iterate_c_heap_oops(instanceKlass* ik, OopClosure* closure);
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on(oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
 #endif
 
- public:
   // Verification
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/instanceRefKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -78,9 +78,9 @@
 
 #ifndef SERIALGC
 template <class T>
-static void specialized_oop_follow_contents(instanceRefKlass* ref,
-                                            ParCompactionManager* cm,
-                                            oop obj) {
+void specialized_oop_follow_contents(instanceRefKlass* ref,
+                                     ParCompactionManager* cm,
+                                     oop obj) {
   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
   T heap_oop = oopDesc::load_heap_oop(referent_addr);
   debug_only(
@@ -397,7 +397,7 @@
 
   // Check that we have the right class
   debug_only(static bool first_time = true);
-  assert(k == SystemDictionary::reference_klass() && first_time,
+  assert(k == SystemDictionary::Reference_klass() && first_time,
          "Invalid update of maps");
   debug_only(first_time = false);
   assert(ik->nonstatic_oop_map_count() == 1, "just checking");
--- a/src/share/vm/oops/klass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/klass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -217,8 +217,8 @@
     set_super(NULL);
     oop_store_without_check((oop*) &_primary_supers[0], (oop) this->as_klassOop());
     assert(super_depth() == 0, "Object must already be initialized properly");
-  } else if (k != super() || k == SystemDictionary::object_klass()) {
-    assert(super() == NULL || super() == SystemDictionary::object_klass(),
+  } else if (k != super() || k == SystemDictionary::Object_klass()) {
+    assert(super() == NULL || super() == SystemDictionary::Object_klass(),
            "initialize this only once to a non-trivial value");
     set_super(k);
     Klass* sup = k->klass_part();
@@ -370,7 +370,7 @@
 void Klass::remove_from_sibling_list() {
   // remove receiver from sibling list
   instanceKlass* super = superklass();
-  assert(super != NULL || as_klassOop() == SystemDictionary::object_klass(), "should have super");
+  assert(super != NULL || as_klassOop() == SystemDictionary::Object_klass(), "should have super");
   if (super == NULL) return;        // special case: class Object
   if (super->subklass() == this) {
     // first subklass
@@ -541,6 +541,7 @@
   st->cr();
 }
 
+#endif //PRODUCT
 
 void Klass::oop_print_value_on(oop obj, outputStream* st) {
   // print title
@@ -549,8 +550,6 @@
   obj->print_address_on(st);
 }
 
-#endif
-
 // Verification
 
 void Klass::oop_verify_on(oop obj, outputStream* st) {
--- a/src/share/vm/oops/klass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/klass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -776,14 +776,13 @@
   // JVMTI support
   virtual jint jvmti_class_status() const;
 
-#ifndef PRODUCT
  public:
   // Printing
+  virtual void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   virtual void oop_print_on      (oop obj, outputStream* st);
-  virtual void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
 
- public:
   // Verification
   virtual const char* internal_name() const = 0;
   virtual void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/klassKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/klassKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -202,13 +202,12 @@
   Klass::oop_print_on(obj, st);
 }
 
+#endif //PRODUCT
 
 void klassKlass::oop_print_value_on(oop obj, outputStream* st) {
   Klass::oop_print_value_on(obj, st);
 }
 
-#endif
-
 const char* klassKlass::internal_name() const {
   return "{other class}";
 }
--- a/src/share/vm/oops/klassKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/klassKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -67,14 +67,13 @@
   juint alloc_size() const              { return _alloc_size; }
   void set_alloc_size(juint n)          { _alloc_size = n; }
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on      (oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
 
- public:
   // Verification
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/methodDataKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/methodDataKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -214,6 +214,8 @@
   m->print_data_on(st);
 }
 
+#endif //PRODUCT
+
 void methodDataKlass::oop_print_value_on(oop obj, outputStream* st) {
   assert(obj->is_methodData(), "should be method data");
   methodDataOop m = methodDataOop(obj);
@@ -221,8 +223,6 @@
   m->method()->print_value_on(st);
 }
 
-#endif // !PRODUCT
-
 const char* methodDataKlass::internal_name() const {
   return "{method data}";
 }
--- a/src/share/vm/oops/methodDataKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/methodDataKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -71,14 +71,13 @@
   int oop_oop_iterate(oop obj, OopClosure* blk);
   int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on      (oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
-#endif // !PRODUCT
+#endif //PRODUCT
 
- public:
   // Verify operations
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/methodDataOop.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/methodDataOop.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -545,6 +545,10 @@
     return cell_offset(counter_cell_count);
   }
 
+  void set_count(uint count) {
+    set_uint_at(count_off, count);
+  }
+
 #ifndef PRODUCT
   void print_data_on(outputStream* st);
 #endif
@@ -692,6 +696,23 @@
 
   void clear_row(uint row) {
     assert(row < row_limit(), "oob");
+    // Clear total count - indicator of polymorphic call site.
+    // The site may look like as monomorphic after that but
+    // it allow to have more accurate profiling information because
+    // there was execution phase change since klasses were unloaded.
+    // If the site is still polymorphic then MDO will be updated
+    // to reflect it. But it could be the case that the site becomes
+    // only bimorphic. Then keeping total count not 0 will be wrong.
+    // Even if we use monomorphic (when it is not) for compilation
+    // we will only have trap, deoptimization and recompile again
+    // with updated MDO after executing method in Interpreter.
+    // An additional receiver will be recorded in the cleaned row
+    // during next call execution.
+    //
+    // Note: our profiling logic works with empty rows in any slot.
+    // We do sorting a profiling info (ciCallProfile) for compilation.
+    //
+    set_count(0);
     set_receiver(row, NULL);
     set_receiver_count(row, 0);
   }
@@ -1391,6 +1412,9 @@
   }
   void inc_decompile_count() {
     _nof_decompiles += 1;
+    if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
+      method()->set_not_compilable();
+    }
   }
 
   // Support for code generation
--- a/src/share/vm/oops/methodKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/methodKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -308,6 +308,7 @@
   }
 }
 
+#endif //PRODUCT
 
 void methodKlass::oop_print_value_on(oop obj, outputStream* st) {
   assert(obj->is_method(), "must be method");
@@ -323,8 +324,6 @@
   if (WizardMode && m->code() != NULL) st->print(" ((nmethod*)%p)", m->code());
 }
 
-#endif // PRODUCT
-
 const char* methodKlass::internal_name() const {
   return "{method}";
 }
--- a/src/share/vm/oops/methodKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/methodKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -68,14 +68,13 @@
   int oop_oop_iterate(oop obj, OopClosure* blk);
   int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on      (oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
 
- public:
   // Verify operations
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/methodOop.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/methodOop.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -456,12 +456,12 @@
     return objArrayHandle(THREAD, Universe::the_empty_class_klass_array());
   } else {
     methodHandle h_this(THREAD, this_oop);
-    objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::class_klass(), length, CHECK_(objArrayHandle()));
+    objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::Class_klass(), length, CHECK_(objArrayHandle()));
     objArrayHandle mirrors (THREAD, m_oop);
     for (int i = 0; i < length; i++) {
       CheckedExceptionElement* table = h_this->checked_exceptions_start(); // recompute on each iteration, not gc safe
       klassOop k = h_this->constants()->klass_at(table[i].class_cp_index, CHECK_(objArrayHandle()));
-      assert(Klass::cast(k)->is_subclass_of(SystemDictionary::throwable_klass()), "invalid exception class");
+      assert(Klass::cast(k)->is_subclass_of(SystemDictionary::Throwable_klass()), "invalid exception class");
       mirrors->obj_at_put(i, Klass::cast(k)->java_mirror());
     }
     return mirrors;
@@ -575,12 +575,6 @@
     return true;
   }
 
-  methodDataOop mdo = method_data();
-  if (mdo != NULL
-      && (uint)mdo->decompile_count() > (uint)PerMethodRecompilationCutoff) {
-    // Since (uint)-1 is large, -1 really means 'no cutoff'.
-    return true;
-  }
 #ifdef COMPILER2
   if (is_tier1_compile(comp_level)) {
     if (is_not_tier1_compilable()) {
@@ -593,7 +587,16 @@
 }
 
 // call this when compiler finds that this method is not compilable
-void methodOopDesc::set_not_compilable(int comp_level) {
+void methodOopDesc::set_not_compilable(int comp_level, bool report) {
+  if (PrintCompilation && report) {
+    ttyLocker ttyl;
+    tty->print("made not compilable ");
+    this->print_short_name(tty);
+    int size = this->code_size();
+    if (size > 0)
+      tty->print(" (%d bytes)", size);
+    tty->cr();
+  }
   if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
     ttyLocker ttyl;
     xtty->begin_elem("make_not_compilable thread='%d'", (int) os::current_thread_id());
@@ -688,7 +691,7 @@
   // so making them eagerly shouldn't be too expensive.
   AdapterHandlerEntry* adapter = AdapterHandlerLibrary::get_adapter(mh);
   if (adapter == NULL ) {
-    THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+    THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(), "out of space in CodeCache for adapters");
   }
 
   mh->set_adapter_entry(adapter);
@@ -705,6 +708,16 @@
 // This function must not hit a safepoint!
 address methodOopDesc::verified_code_entry() {
   debug_only(No_Safepoint_Verifier nsv;)
+  nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
+  if (code == NULL && UseCodeCacheFlushing) {
+    nmethod *saved_code = CodeCache::find_and_remove_saved_code(this);
+    if (saved_code != NULL) {
+      methodHandle method(this);
+      assert( ! saved_code->is_osr_method(), "should not get here for osr" );
+      set_code( method, saved_code );
+    }
+  }
+
   assert(_from_compiled_entry != NULL, "must be set");
   return _from_compiled_entry;
 }
@@ -733,8 +746,8 @@
   int comp_level = code->comp_level();
   // In theory there could be a race here. In practice it is unlikely
   // and not worth worrying about.
-  if (comp_level > highest_tier_compile()) {
-    set_highest_tier_compile(comp_level);
+  if (comp_level > mh->highest_tier_compile()) {
+    mh->set_highest_tier_compile(comp_level);
   }
 
   OrderAccess::storestore();
@@ -821,6 +834,18 @@
   return pchase;
 }
 
+//------------------------------------------------------------------------------
+// methodOopDesc::is_method_handle_adapter
+//
+// Tests if this method is an internal adapter frame from the
+// MethodHandleCompiler.
+bool methodOopDesc::is_method_handle_adapter() const {
+  return ((name() == vmSymbols::invoke_name() &&
+           method_holder() == SystemDictionary::MethodHandle_klass())
+          ||
+          method_holder() == SystemDictionary::InvokeDynamic_klass());
+}
+
 methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
                                                symbolHandle signature,
                                                Handle method_type, TRAPS) {
@@ -1032,8 +1057,8 @@
       // We are loading classes eagerly. If a ClassNotFoundException or
       // a LinkageError was generated, be sure to ignore it.
       if (HAS_PENDING_EXCEPTION) {
-        if (PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass()) ||
-            PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
+        if (PENDING_EXCEPTION->is_a(SystemDictionary::ClassNotFoundException_klass()) ||
+            PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
           CLEAR_PENDING_EXCEPTION;
         } else {
           return false;
--- a/src/share/vm/oops/methodOop.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/methodOop.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -303,7 +303,7 @@
   bool check_code() const;      // Not inline to avoid circular ref
   nmethod* volatile code() const                 { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
   void clear_code();            // Clear out any compiled code
-  void set_code(methodHandle mh, nmethod* code);
+  static void set_code(methodHandle mh, nmethod* code);
   void set_adapter_entry(AdapterHandlerEntry* adapter) {  _adapter = adapter; }
   address get_i2c_entry();
   address get_c2i_entry();
@@ -365,6 +365,7 @@
 #endif
 
   // byte codes
+  void    set_code(address code)      { return constMethod()->set_code(code); }
   address code_base() const           { return constMethod()->code_base(); }
   bool    contains(address bcp) const { return constMethod()->contains(bcp); }
 
@@ -524,6 +525,9 @@
 
   // JSR 292 support
   bool is_method_handle_invoke() const              { return access_flags().is_method_handle_invoke(); }
+  // Tests if this method is an internal adapter frame from the
+  // MethodHandleCompiler.
+  bool is_method_handle_adapter() const;
   static methodHandle make_invoke_method(KlassHandle holder,
                                          symbolHandle signature,
                                          Handle method_type,
@@ -537,6 +541,7 @@
   // all without checking for a stack overflow
   static int extra_stack_entries() { return (EnableMethodHandles ? (int)MethodHandlePushLimit : 0) + (EnableInvokeDynamic ? 3 : 0); }
   static int extra_stack_words();  // = extra_stack_entries() * Interpreter::stackElementSize()
+
   // RedefineClasses() support:
   bool is_old() const                               { return access_flags().is_old(); }
   void set_is_old()                                 { _access_flags.set_is_old(); }
@@ -591,7 +596,10 @@
   // whether it is not compilable for another reason like having a
   // breakpoint set in it.
   bool is_not_compilable(int comp_level = CompLevel_highest_tier) const;
-  void set_not_compilable(int comp_level = CompLevel_highest_tier);
+  void set_not_compilable(int comp_level = CompLevel_highest_tier, bool report = true);
+  void set_not_compilable_quietly(int comp_level = CompLevel_highest_tier) {
+    set_not_compilable(comp_level, false);
+  }
 
   bool is_not_osr_compilable() const             { return is_not_compilable() || access_flags().is_not_osr_compilable(); }
   void set_not_osr_compilable()                  { _access_flags.set_not_osr_compilable(); }
--- a/src/share/vm/oops/objArrayKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/objArrayKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -127,16 +127,14 @@
           // pointer delta is scaled to number of elements (length field in
           // objArrayOop) which we assume is 32 bit.
           assert(pd == (size_t)(int)pd, "length field overflow");
-          const size_t done_word_len = objArrayOopDesc::array_size((int)pd);
-          bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
+          bs->write_ref_array((HeapWord*)dst, pd);
           THROW(vmSymbols::java_lang_ArrayStoreException());
           return;
         }
       }
     }
   }
-  const size_t word_len = objArrayOopDesc::array_size(length);
-  bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
+  bs->write_ref_array((HeapWord*)dst, length);
 }
 
 void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
@@ -248,8 +246,8 @@
   } else {
     objArrayOop sec_oop = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
     objArrayHandle secondaries(THREAD, sec_oop);
-    secondaries->obj_at_put(num_extra_slots+0, SystemDictionary::cloneable_klass());
-    secondaries->obj_at_put(num_extra_slots+1, SystemDictionary::serializable_klass());
+    secondaries->obj_at_put(num_extra_slots+0, SystemDictionary::Cloneable_klass());
+    secondaries->obj_at_put(num_extra_slots+1, SystemDictionary::Serializable_klass());
     for (int i = 0; i < num_elem_supers; i++) {
       klassOop elem_super = (klassOop) elem_supers->obj_at(i);
       klassOop array_super = elem_super->klass_part()->array_klass_or_null();
@@ -501,6 +499,8 @@
   }
 }
 
+#endif //PRODUCT
+
 static int max_objArray_print_length = 4;
 
 void objArrayKlass::oop_print_value_on(oop obj, outputStream* st) {
@@ -510,7 +510,7 @@
   int len = objArrayOop(obj)->length();
   st->print("[%d] ", len);
   obj->print_address_on(st);
-  if (PrintOopAddress || PrintMiscellaneous && (WizardMode || Verbose)) {
+  if (NOT_PRODUCT(PrintOopAddress ||) PrintMiscellaneous && (WizardMode || Verbose)) {
     st->print("{");
     for (int i = 0; i < len; i++) {
       if (i > max_objArray_print_length) {
@@ -522,8 +522,6 @@
   }
 }
 
-#endif // PRODUCT
-
 const char* objArrayKlass::internal_name() const {
   return external_name();
 }
--- a/src/share/vm/oops/objArrayKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/objArrayKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -119,14 +119,13 @@
  private:
    static klassOop array_klass_impl   (objArrayKlassHandle this_oop, bool or_null, int n, TRAPS);
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on      (oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
 
- public:
   // Verification
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/objArrayKlassKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/objArrayKlassKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -99,7 +99,7 @@
       }
     } else {
       // The element type is already Object.  Object[] has direct super of Object.
-      super_klass = KlassHandle(THREAD, SystemDictionary::object_klass());
+      super_klass = KlassHandle(THREAD, SystemDictionary::Object_klass());
     }
   }
 
@@ -278,6 +278,7 @@
   st->cr();
 }
 
+#endif //PRODUCT
 
 void objArrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
   assert(obj->is_klass(), "must be klass");
@@ -287,8 +288,6 @@
   st->print("[]");
 }
 
-#endif
-
 const char* objArrayKlassKlass::internal_name() const {
   return "{object array class}";
 }
--- a/src/share/vm/oops/objArrayKlassKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/objArrayKlassKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -64,14 +64,13 @@
   // helpers
   static klassOop allocate_objArray_klass_impl(objArrayKlassKlassHandle this_oop, int n, KlassHandle element_klass, TRAPS);
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on(oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
 
- public:
   // Verification
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/objArrayOop.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/objArrayOop.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -37,6 +37,32 @@
     return &((T*)base())[index];
   }
 
+private:
+  // Give size of objArrayOop in HeapWords minus the header
+  static int array_size(int length) {
+    const int OopsPerHeapWord = HeapWordSize/heapOopSize;
+    assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
+           "Else the following (new) computation would be in error");
+#ifdef ASSERT
+    // The old code is left in for sanity-checking; it'll
+    // go away pretty soon. XXX
+    // Without UseCompressedOops, this is simply:
+    // oop->length() * HeapWordsPerOop;
+    // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
+    // The oop elements are aligned up to wordSize
+    const int HeapWordsPerOop = heapOopSize/HeapWordSize;
+    int old_res;
+    if (HeapWordsPerOop > 0) {
+      old_res = length * HeapWordsPerOop;
+    } else {
+      old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
+    }
+#endif  // ASSERT
+    int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
+    assert(res == old_res, "Inconsistency between old and new.");
+    return res;
+  }
+
  public:
   // Returns the offset of the first element.
   static int base_offset_in_bytes() {
@@ -67,27 +93,14 @@
   // Sizing
   static int header_size()    { return arrayOopDesc::header_size(T_OBJECT); }
   int object_size()           { return object_size(length()); }
-  int array_size()            { return array_size(length()); }
 
   static int object_size(int length) {
     // This returns the object size in HeapWords.
-    return align_object_size(header_size() + array_size(length));
-  }
-
-  // Give size of objArrayOop in HeapWords minus the header
-  static int array_size(int length) {
-    // Without UseCompressedOops, this is simply:
-    // oop->length() * HeapWordsPerOop;
-    // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
-    // The oop elements are aligned up to wordSize
-    const int HeapWordsPerOop = heapOopSize/HeapWordSize;
-    if (HeapWordsPerOop > 0) {
-      return length * HeapWordsPerOop;
-    } else {
-      const int OopsPerHeapWord = HeapWordSize/heapOopSize;
-      int word_len = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
-      return word_len;
-    }
+    uint asz = array_size(length);
+    uint osz = align_object_size(header_size() + asz);
+    assert(osz >= asz,   "no overflow");
+    assert((int)osz > 0, "no overflow");
+    return (int)osz;
   }
 
   // special iterators for index ranges, returns size of object
--- a/src/share/vm/oops/oop.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/oop.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -31,14 +31,13 @@
 
 #ifdef PRODUCT
 void oopDesc::print_on(outputStream* st) const {}
-void oopDesc::print_value_on(outputStream* st) const {}
 void oopDesc::print_address_on(outputStream* st) const {}
-char* oopDesc::print_value_string() { return NULL; }
 char* oopDesc::print_string() { return NULL; }
 void oopDesc::print()         {}
-void oopDesc::print_value()   {}
 void oopDesc::print_address() {}
-#else
+
+#else //PRODUCT
+
 void oopDesc::print_on(outputStream* st) const {
   if (this == NULL) {
     st->print_cr("NULL");
@@ -47,22 +46,6 @@
   }
 }
 
-void oopDesc::print_value_on(outputStream* st) const {
-  oop obj = oop(this);
-  if (this == NULL) {
-    st->print("NULL");
-  } else if (java_lang_String::is_instance(obj)) {
-    java_lang_String::print(obj, st);
-    if (PrintOopAddress) print_address_on(st);
-#ifdef ASSERT
-  } else if (!Universe::heap()->is_in(obj) || !Universe::heap()->is_in(klass())) {
-    st->print("### BAD OOP %p ###", (address)obj);
-#endif
-  } else {
-    blueprint()->oop_print_value_on(obj, st);
-  }
-}
-
 void oopDesc::print_address_on(outputStream* st) const {
   if (PrintOopAddress) {
     st->print("{"INTPTR_FORMAT"}", this);
@@ -71,23 +54,47 @@
 
 void oopDesc::print()         { print_on(tty);         }
 
-void oopDesc::print_value()   { print_value_on(tty);   }
-
 void oopDesc::print_address() { print_address_on(tty); }
 
 char* oopDesc::print_string() {
-  stringStream* st = new stringStream();
-  print_on(st);
-  return st->as_string();
+  stringStream st;
+  print_on(&st);
+  return st.as_string();
+}
+
+#endif // PRODUCT
+
+// The print_value functions are present in all builds, to support the disassembler.
+
+void oopDesc::print_value() {
+  print_value_on(tty);
 }
 
 char* oopDesc::print_value_string() {
-  stringStream* st = new stringStream();
-  print_value_on(st);
-  return st->as_string();
+  char buf[100];
+  stringStream st(buf, sizeof(buf));
+  print_value_on(&st);
+  return st.as_string();
 }
 
-#endif // PRODUCT
+void oopDesc::print_value_on(outputStream* st) const {
+  oop obj = oop(this);
+  if (this == NULL) {
+    st->print("NULL");
+  } else if (java_lang_String::is_instance(obj)) {
+    java_lang_String::print(obj, st);
+#ifndef PRODUCT
+    if (PrintOopAddress) print_address_on(st);
+#endif //PRODUCT
+#ifdef ASSERT
+  } else if (!Universe::heap()->is_in(obj) || !Universe::heap()->is_in(klass())) {
+    st->print("### BAD OOP %p ###", (address)obj);
+#endif //ASSERT
+  } else {
+    blueprint()->oop_print_value_on(obj, st);
+  }
+}
+
 
 void oopDesc::verify_on(outputStream* st) {
   if (this != NULL) {
--- a/src/share/vm/oops/oop.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/oop.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -30,13 +30,12 @@
 // no virtual functions allowed
 
 // store into oop with store check
-template <class T> void oop_store(T* p, oop v);
-template <class T> void oop_store(volatile T* p, oop v);
+template <class T> inline void oop_store(T* p, oop v);
+template <class T> inline void oop_store(volatile T* p, oop v);
 
 // store into oop without store check
-template <class T> void oop_store_without_check(T* p, oop v);
-template <class T> void oop_store_without_check(volatile T* p, oop v);
-
+template <class T> inline void oop_store_without_check(T* p, oop v);
+template <class T> inline void oop_store_without_check(volatile T* p, oop v);
 
 extern bool always_do_update_barrier;
 
--- a/src/share/vm/oops/symbolKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/symbolKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -213,6 +213,8 @@
   st->print("'");
 }
 
+#endif //PRODUCT
+
 void symbolKlass::oop_print_value_on(oop obj, outputStream* st) {
   symbolOop sym = symbolOop(obj);
   st->print("'");
@@ -222,8 +224,6 @@
   st->print("'");
 }
 
-#endif //PRODUCT
-
 const char* symbolKlass::internal_name() const {
   return "{symbol}";
 }
--- a/src/share/vm/oops/symbolKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/symbolKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -65,10 +65,10 @@
   int  oop_oop_iterate(oop obj, OopClosure* blk);
   int  oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
 
-#ifndef PRODUCT
   // Printing
   void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
   const char* internal_name() const;
 };
--- a/src/share/vm/oops/symbolOop.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/symbolOop.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,11 @@
 # include "incls/_precompiled.incl"
 # include "incls/_symbolOop.cpp.incl"
 
+
+// ------------------------------------------------------------------
+// symbolOopDesc::equals
+//
+// Compares the symbol with a string of the given length.
 bool symbolOopDesc::equals(const char* str, int len) const {
   int l = utf8_length();
   if (l != len) return false;
@@ -36,6 +41,48 @@
   return true;
 }
 
+
+// ------------------------------------------------------------------
+// symbolOopDesc::starts_with
+//
+// Tests if the symbol starts with the specified prefix of the given
+// length.
+bool symbolOopDesc::starts_with(const char* prefix, int len) const {
+  if (len > utf8_length()) return false;
+  while (len-- > 0) {
+    if (prefix[len] != (char) byte_at(len))
+      return false;
+  }
+  assert(len == -1, "we should be at the beginning");
+  return true;
+}
+
+
+// ------------------------------------------------------------------
+// symbolOopDesc::index_of
+//
+// Finds if the given string is a substring of this symbol's utf8 bytes.
+// Return -1 on failure.  Otherwise return the first index where str occurs.
+int symbolOopDesc::index_of_at(int i, const char* str, int len) const {
+  assert(i >= 0 && i <= utf8_length(), "oob");
+  if (len <= 0)  return 0;
+  char first_char = str[0];
+  address bytes = (address) ((symbolOopDesc*)this)->base();
+  address limit = bytes + utf8_length() - len;  // inclusive limit
+  address scan = bytes + i;
+  if (scan > limit)
+    return -1;
+  for (;;) {
+    scan = (address) memchr(scan, first_char, (limit + 1 - scan));
+    if (scan == NULL)
+      return -1;  // not found
+    assert(scan >= bytes+i && scan <= limit, "scan oob");
+    if (memcmp(scan, str, len) == 0)
+      return (int)(scan - bytes);
+  }
+}
+
+
 char* symbolOopDesc::as_C_string(char* buf, int size) const {
   if (size > 0) {
     int len = MIN2(size - 1, utf8_length());
--- a/src/share/vm/oops/symbolOop.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/symbolOop.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,8 +70,21 @@
 
   void set_utf8_length(int len) { _length = len; }
 
-  // Compares the symbol with a string
+  // Compares the symbol with a string.
   bool equals(const char* str, int len) const;
+  bool equals(const char* str) const { return equals(str, (int) strlen(str)); }
+
+  // Tests if the symbol starts with the given prefix.
+  bool starts_with(const char* prefix, int len) const;
+  bool starts_with(const char* prefix) const {
+    return starts_with(prefix, (int) strlen(prefix));
+  }
+
+  // Tests if the symbol starts with the given prefix.
+  int index_of_at(int i, const char* str, int len) const;
+  int index_of_at(int i, const char* str) const {
+    return index_of_at(i, str, (int) strlen(str));
+  }
 
   // Three-way compare for sorting; returns -1/0/1 if receiver is </==/> than arg
   // note that the ordering is not alfabetical
--- a/src/share/vm/oops/typeArrayKlassKlass.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/typeArrayKlassKlass.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -45,6 +45,7 @@
   Klass:: oop_print_on(obj, st);
 }
 
+#endif //PRODUCT
 
 void typeArrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
   assert(obj->is_klass(), "must be klass");
@@ -63,8 +64,6 @@
   st->print("}");
 }
 
-#endif
-
 const char* typeArrayKlassKlass::internal_name() const {
   return "{type array class}";
 }
--- a/src/share/vm/oops/typeArrayKlassKlass.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/oops/typeArrayKlassKlass.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -47,12 +47,12 @@
   static int header_size() { return oopDesc::header_size() + sizeof(typeArrayKlassKlass)/HeapWordSize; }
   int object_size() const  { return align_object_size(header_size()); }
 
-#ifndef PRODUCT
  public:
   // Printing
+  void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
   void oop_print_on(oop obj, outputStream* st);
-  void oop_print_value_on(oop obj, outputStream* st);
-#endif
- public:
+#endif //PRODUCT
+
   const char* internal_name() const;
 };
--- a/src/share/vm/opto/bytecodeInfo.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -27,11 +27,16 @@
 
 //=============================================================================
 //------------------------------InlineTree-------------------------------------
-InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* callee, JVMState* caller_jvms, int caller_bci, float site_invoke_ratio )
+InlineTree::InlineTree( Compile* c,
+                        const InlineTree *caller_tree, ciMethod* callee,
+                        JVMState* caller_jvms, int caller_bci,
+                        float site_invoke_ratio, int site_depth_adjust)
 : C(c), _caller_jvms(caller_jvms),
   _caller_tree((InlineTree*)caller_tree),
   _method(callee), _site_invoke_ratio(site_invoke_ratio),
-  _count_inline_bcs(method()->code_size()) {
+  _site_depth_adjust(site_depth_adjust),
+  _count_inline_bcs(method()->code_size())
+{
   NOT_PRODUCT(_count_inlines = 0;)
   if (_caller_jvms != NULL) {
     // Keep a private copy of the caller_jvms:
@@ -40,7 +45,7 @@
     assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining");
   }
   assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
-  assert((caller_tree == NULL ? 0 : caller_tree->inline_depth() + 1) == inline_depth(), "correct (redundant) depth parameter");
+  assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
   assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter");
   if (UseOldInlining) {
     // Update hierarchical counts, count_inline_bcs() and count_inlines()
@@ -52,10 +57,13 @@
   }
 }
 
-InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio)
+InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms,
+                       float site_invoke_ratio, int site_depth_adjust)
 : C(c), _caller_jvms(caller_jvms), _caller_tree(NULL),
   _method(callee_method), _site_invoke_ratio(site_invoke_ratio),
-  _count_inline_bcs(method()->code_size()) {
+  _site_depth_adjust(site_depth_adjust),
+  _count_inline_bcs(method()->code_size())
+{
   NOT_PRODUCT(_count_inlines = 0;)
   assert(!UseOldInlining, "do not use for old stuff");
 }
@@ -180,6 +188,10 @@
     return NULL;
   }
 
+  // Always inline MethodHandle methods.
+  if (callee_method->is_method_handle_invoke())
+    return NULL;
+
   // First check all inlining restrictions which are required for correctness
   if (callee_method->is_abstract())               return "abstract method";
   // note: we allow ik->is_abstract()
@@ -265,10 +277,13 @@
     return msg;
   }
 
-  bool is_accessor = InlineAccessors && callee_method->is_accessor();
+  if (InlineAccessors && callee_method->is_accessor()) {
+    // accessor methods are not subject to any of the following limits.
+    return NULL;
+  }
 
   // suppress a few checks for accessors and trivial methods
-  if (!is_accessor && callee_method->code_size() > MaxTrivialSize) {
+  if (callee_method->code_size() > MaxTrivialSize) {
 
     // don't inline into giant methods
     if (C->unique() > (uint)NodeCountInliningCutoff) {
@@ -287,7 +302,7 @@
     }
   }
 
-  if (!C->do_inlining() && InlineAccessors && !is_accessor) {
+  if (!C->do_inlining() && InlineAccessors) {
     return "not an accessor";
   }
   if( inline_depth() > MaxInlineLevel ) {
@@ -322,14 +337,17 @@
     // stricter than callee_holder->is_initialized()
     ciBytecodeStream iter(caller_method);
     iter.force_bci(caller_bci);
-    int index = iter.get_index_int();
-    if( !caller_method->is_klass_loaded(index, true) ) {
-      return false;
-    }
-    // Try to do constant pool resolution if running Xcomp
     Bytecodes::Code call_bc = iter.cur_bc();
-    if( !caller_method->check_call(index, call_bc == Bytecodes::_invokestatic) ) {
-      return false;
+    // An invokedynamic instruction does not have a klass.
+    if (call_bc != Bytecodes::_invokedynamic) {
+      int index = iter.get_index_int();
+      if (!caller_method->is_klass_loaded(index, true)) {
+        return false;
+      }
+      // Try to do constant pool resolution if running Xcomp
+      if( !caller_method->check_call(index, call_bc == Bytecodes::_invokestatic) ) {
+        return false;
+      }
     }
   }
   // We will attempt to see if a class/field/etc got properly loaded.  If it
@@ -457,7 +475,30 @@
   if (old_ilt != NULL) {
     return old_ilt;
   }
-  InlineTree *ilt = new InlineTree( C, this, callee_method, caller_jvms, caller_bci, recur_frequency );
+  int new_depth_adjust = 0;
+  if (caller_jvms->method() != NULL) {
+    if ((caller_jvms->method()->name() == ciSymbol::invoke_name() &&
+         caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_MethodHandle())
+        || caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_InvokeDynamic())
+      /* @@@ FIXME:
+    if (caller_jvms->method()->is_method_handle_adapter())
+      */
+      new_depth_adjust -= 1;  // don't count actions in MH or indy adapter frames
+    else if (callee_method->is_method_handle_invoke()) {
+      new_depth_adjust -= 1;  // don't count method handle calls from java.dyn implem
+    }
+    if (new_depth_adjust != 0 && PrintInlining) {
+      stringStream nm1; caller_jvms->method()->print_name(&nm1);
+      stringStream nm2; callee_method->print_name(&nm2);
+      tty->print_cr("discounting inlining depth from %s to %s", nm1.base(), nm2.base());
+    }
+    if (new_depth_adjust != 0 && C->log()) {
+      int id1 = C->log()->identify(caller_jvms->method());
+      int id2 = C->log()->identify(callee_method);
+      C->log()->elem("inline_depth_discount caller='%d' callee='%d'", id1, id2);
+    }
+  }
+  InlineTree *ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _site_depth_adjust + new_depth_adjust);
   _subtrees.append( ilt );
 
   NOT_PRODUCT( _count_inlines += 1; )
@@ -483,7 +524,7 @@
   Compile* C = Compile::current();
 
   // Root of inline tree
-  InlineTree *ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F);
+  InlineTree *ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, 0);
 
   return ilt;
 }
--- a/src/share/vm/opto/c2_globals.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/c2_globals.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -25,4 +25,4 @@
 # include "incls/_precompiled.incl"
 # include "incls/_c2_globals.cpp.incl"
 
-C2_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C2_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- a/src/share/vm/opto/c2_globals.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/c2_globals.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -26,7 +26,7 @@
 // Defines all globals flags used by the server compiler.
 //
 
-#define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+#define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct) \
                                                                             \
   notproduct(intx, CompileZapFirst, 0,                                      \
           "If +ZapDeadCompiledLocals, "                                     \
@@ -154,6 +154,12 @@
   notproduct(bool, TraceProfileTripCount, false,                            \
           "Trace profile loop trip count information")                      \
                                                                             \
+  product(bool, UseLoopPredicate, true,                                     \
+          "Generate a predicate to select fast/slow loop versions")         \
+                                                                            \
+  develop(bool, TraceLoopPredicate, false,                                  \
+          "Trace generation of loop predicates")                            \
+                                                                            \
   develop(bool, OptoCoalesce, true,                                         \
           "Use Conservative Copy Coalescing in the Register Allocator")     \
                                                                             \
@@ -394,6 +400,12 @@
   product(bool, UseOptoBiasInlining, true,                                  \
           "Generate biased locking code in C2 ideal graph")                 \
                                                                             \
+  product(bool, OptimizeStringConcat, false,                                \
+          "Optimize the construction of Strings by StringBuilder")          \
+                                                                            \
+  notproduct(bool, PrintOptimizeStringConcat, false,                        \
+          "Print information about transformations performed on Strings")   \
+                                                                            \
   product(intx, ValueSearchLimit, 1000,                                     \
           "Recursion limit in PhaseMacroExpand::value_from_mem_phi")        \
                                                                             \
@@ -413,4 +425,4 @@
   product(bool, BlockLayoutRotateLoops, true,                               \
           "Allow back branches to be fall throughs in the block layour")    \
 
-C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
--- a/src/share/vm/opto/callGenerator.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/callGenerator.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,12 +98,21 @@
 //---------------------------DirectCallGenerator------------------------------
 // Internal class which handles all out-of-line calls w/o receiver type checks.
 class DirectCallGenerator : public CallGenerator {
-public:
-  DirectCallGenerator(ciMethod* method)
-    : CallGenerator(method)
+ private:
+  CallStaticJavaNode* _call_node;
+  // Force separate memory and I/O projections for the exceptional
+  // paths to facilitate late inlinig.
+  bool                _separate_io_proj;
+
+ public:
+  DirectCallGenerator(ciMethod* method, bool separate_io_proj)
+    : CallGenerator(method),
+      _separate_io_proj(separate_io_proj)
   {
   }
   virtual JVMState* generate(JVMState* jvms);
+
+  CallStaticJavaNode* call_node() const { return _call_node; }
 };
 
 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
@@ -127,14 +136,85 @@
     }
     // Mark the call node as virtual, sort of:
     call->set_optimized_virtual(true);
+    if (method()->is_method_handle_invoke()) {
+      call->set_method_handle_invoke(true);
+      kit.C->set_has_method_handle_invokes(true);
+    }
   }
   kit.set_arguments_for_java_call(call);
+  kit.set_edges_for_java_call(call, false, _separate_io_proj);
+  Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
+  kit.push_node(method()->return_type()->basic_type(), ret);
+  _call_node = call;  // Save the call node in case we need it later
+  return kit.transfer_exceptions_into_jvms();
+}
+
+//---------------------------DynamicCallGenerator-----------------------------
+// Internal class which handles all out-of-line invokedynamic calls.
+class DynamicCallGenerator : public CallGenerator {
+public:
+  DynamicCallGenerator(ciMethod* method)
+    : CallGenerator(method)
+  {
+  }
+  virtual JVMState* generate(JVMState* jvms);
+};
+
+JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
+  GraphKit kit(jvms);
+
+  if (kit.C->log() != NULL) {
+    kit.C->log()->elem("dynamic_call bci='%d'", jvms->bci());
+  }
+
+  // Get the constant pool cache from the caller class.
+  ciMethod* caller_method = jvms->method();
+  ciBytecodeStream str(caller_method);
+  str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
+  assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!");
+  ciCPCache* cpcache = str.get_cpcache();
+
+  // Get the offset of the CallSite from the constant pool cache
+  // pointer.
+  int index = str.get_method_index();
+  size_t call_site_offset = cpcache->get_f1_offset(index);
+
+  // Load the CallSite object from the constant pool cache.
+  const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
+  Node* cpcache_adr = kit.makecon(cpcache_ptr);
+  Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
+  Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
+
+  // Load the target MethodHandle from the CallSite object.
+  Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
+  Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT);
+
+  address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub();
+
+  CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci());
+  // invokedynamic is treated as an optimized invokevirtual.
+  call->set_optimized_virtual(true);
+  // Take extra care (in the presence of argument motion) not to trash the SP:
+  call->set_method_handle_invoke(true);
+  kit.C->set_has_method_handle_invokes(true);
+
+  // Pass the target MethodHandle as first argument and shift the
+  // other arguments.
+  call->init_req(0 + TypeFunc::Parms, target_mh);
+  uint nargs = call->method()->arg_size();
+  for (uint i = 1; i < nargs; i++) {
+    Node* arg = kit.argument(i - 1);
+    call->init_req(i + TypeFunc::Parms, arg);
+  }
+
   kit.set_edges_for_java_call(call);
   Node* ret = kit.set_results_for_java_call(call);
   kit.push_node(method()->return_type()->basic_type(), ret);
   return kit.transfer_exceptions_into_jvms();
 }
 
+//--------------------------VirtualCallGenerator------------------------------
+// Internal class which handles all out-of-line calls checking receiver type.
 class VirtualCallGenerator : public CallGenerator {
 private:
   int _vtable_index;
@@ -149,8 +229,6 @@
   virtual JVMState* generate(JVMState* jvms);
 };
 
-//--------------------------VirtualCallGenerator------------------------------
-// Internal class which handles all out-of-line calls checking receiver type.
 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
   GraphKit kit(jvms);
   Node* receiver = kit.argument(0);
@@ -238,16 +316,124 @@
   return new ParseGenerator(m, expected_uses, true);
 }
 
-CallGenerator* CallGenerator::for_direct_call(ciMethod* m) {
+CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
   assert(!m->is_abstract(), "for_direct_call mismatch");
-  return new DirectCallGenerator(m);
+  return new DirectCallGenerator(m, separate_io_proj);
+}
+
+CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
+  assert(m->is_method_handle_invoke(), "for_dynamic_call mismatch");
+  return new DynamicCallGenerator(m);
 }
 
 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
   assert(!m->is_static(), "for_virtual_call mismatch");
+  assert(!m->is_method_handle_invoke(), "should be a direct call");
   return new VirtualCallGenerator(m, vtable_index);
 }
 
+// Allow inlining decisions to be delayed
+class LateInlineCallGenerator : public DirectCallGenerator {
+  CallGenerator* _inline_cg;
+
+ public:
+  LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
+    DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
+
+  virtual bool      is_late_inline() const { return true; }
+
+  // Convert the CallStaticJava into an inline
+  virtual void do_late_inline();
+
+  JVMState* generate(JVMState* jvms) {
+    // Record that this call site should be revisited once the main
+    // parse is finished.
+    Compile::current()->add_late_inline(this);
+
+    // Emit the CallStaticJava and request separate projections so
+    // that the late inlining logic can distinguish between fall
+    // through and exceptional uses of the memory and io projections
+    // as is done for allocations and macro expansion.
+    return DirectCallGenerator::generate(jvms);
+  }
+
+};
+
+
+void LateInlineCallGenerator::do_late_inline() {
+  // Can't inline it
+  if (call_node() == NULL || call_node()->outcnt() == 0 ||
+      call_node()->in(0) == NULL || call_node()->in(0)->is_top())
+    return;
+
+  CallStaticJavaNode* call = call_node();
+
+  // Make a clone of the JVMState that appropriate to use for driving a parse
+  Compile* C = Compile::current();
+  JVMState* jvms     = call->jvms()->clone_shallow(C);
+  uint size = call->req();
+  SafePointNode* map = new (C, size) SafePointNode(size, jvms);
+  for (uint i1 = 0; i1 < size; i1++) {
+    map->init_req(i1, call->in(i1));
+  }
+
+  // Make sure the state is a MergeMem for parsing.
+  if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
+    map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
+  }
+
+  // Make enough space for the expression stack and transfer the incoming arguments
+  int nargs    = method()->arg_size();
+  jvms->set_map(map);
+  map->ensure_stack(jvms, jvms->method()->max_stack());
+  if (nargs > 0) {
+    for (int i1 = 0; i1 < nargs; i1++) {
+      map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
+    }
+  }
+
+  CompileLog* log = C->log();
+  if (log != NULL) {
+    log->head("late_inline method='%d'", log->identify(method()));
+    JVMState* p = jvms;
+    while (p != NULL) {
+      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+      p = p->caller();
+    }
+    log->tail("late_inline");
+  }
+
+  // Setup default node notes to be picked up by the inlining
+  Node_Notes* old_nn = C->default_node_notes();
+  if (old_nn != NULL) {
+    Node_Notes* entry_nn = old_nn->clone(C);
+    entry_nn->set_jvms(jvms);
+    C->set_default_node_notes(entry_nn);
+  }
+
+  // Now perform the inling using the synthesized JVMState
+  JVMState* new_jvms = _inline_cg->generate(jvms);
+  if (new_jvms == NULL)  return;  // no change
+  if (C->failing())      return;
+
+  // Capture any exceptional control flow
+  GraphKit kit(new_jvms);
+
+  // Find the result object
+  Node* result = C->top();
+  int   result_size = method()->return_type()->size();
+  if (result_size != 0 && !kit.stopped()) {
+    result = (result_size == 1) ? kit.pop() : kit.pop_pair();
+  }
+
+  kit.replace_call(call, result);
+}
+
+
+CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
+  return new LateInlineCallGenerator(method, inline_cg);
+}
+
 
 //---------------------------WarmCallGenerator--------------------------------
 // Internal class which handles initial deferral of inlining decisions.
@@ -315,70 +501,7 @@
 }
 
 void WarmCallInfo::make_hot() {
-  Compile* C = Compile::current();
-  // Replace the callnode with something better.
-  CallJavaNode* call = this->call()->as_CallJava();
-  ciMethod* method   = call->method();
-  int       nargs    = method->arg_size();
-  JVMState* jvms     = call->jvms()->clone_shallow(C);
-  uint size = TypeFunc::Parms + MAX2(2, nargs);
-  SafePointNode* map = new (C, size) SafePointNode(size, jvms);
-  for (uint i1 = 0; i1 < (uint)(TypeFunc::Parms + nargs); i1++) {
-    map->init_req(i1, call->in(i1));
-  }
-  jvms->set_map(map);
-  jvms->set_offsets(map->req());
-  jvms->set_locoff(TypeFunc::Parms);
-  jvms->set_stkoff(TypeFunc::Parms);
-  GraphKit kit(jvms);
-
-  JVMState* new_jvms = _hot_cg->generate(kit.jvms());
-  if (new_jvms == NULL)  return;  // no change
-  if (C->failing())      return;
-
-  kit.set_jvms(new_jvms);
-  Node* res = C->top();
-  int   res_size = method->return_type()->size();
-  if (res_size != 0) {
-    kit.inc_sp(-res_size);
-    res = kit.argument(0);
-  }
-  GraphKit ekit(kit.combine_and_pop_all_exception_states()->jvms());
-
-  // Replace the call:
-  for (DUIterator i = call->outs(); call->has_out(i); i++) {
-    Node* n = call->out(i);
-    Node* nn = NULL;  // replacement
-    if (n->is_Proj()) {
-      ProjNode* nproj = n->as_Proj();
-      assert(nproj->_con < (uint)(TypeFunc::Parms + (res_size ? 1 : 0)), "sane proj");
-      if (nproj->_con == TypeFunc::Parms) {
-        nn = res;
-      } else {
-        nn = kit.map()->in(nproj->_con);
-      }
-      if (nproj->_con == TypeFunc::I_O) {
-        for (DUIterator j = nproj->outs(); nproj->has_out(j); j++) {
-          Node* e = nproj->out(j);
-          if (e->Opcode() == Op_CreateEx) {
-            e->replace_by(ekit.argument(0));
-          } else if (e->Opcode() == Op_Catch) {
-            for (DUIterator k = e->outs(); e->has_out(k); k++) {
-              CatchProjNode* p = e->out(j)->as_CatchProj();
-              if (p->is_handler_proj()) {
-                p->replace_by(ekit.control());
-              } else {
-                p->replace_by(kit.control());
-              }
-            }
-          }
-        }
-      }
-    }
-    NOT_PRODUCT(if (!nn)  n->dump(2));
-    assert(nn != NULL, "don't know what to do with this user");
-    n->replace_by(nn);
-  }
+  Unimplemented();
 }
 
 void WarmCallInfo::make_cold() {
@@ -527,6 +650,155 @@
 }
 
 
+//------------------------PredictedDynamicCallGenerator-----------------------
+// Internal class which handles all out-of-line calls checking receiver type.
+class PredictedDynamicCallGenerator : public CallGenerator {
+  ciMethodHandle* _predicted_method_handle;
+  CallGenerator*  _if_missed;
+  CallGenerator*  _if_hit;
+  float           _hit_prob;
+
+public:
+  PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle,
+                                CallGenerator* if_missed,
+                                CallGenerator* if_hit,
+                                float hit_prob)
+    : CallGenerator(if_missed->method()),
+      _predicted_method_handle(predicted_method_handle),
+      _if_missed(if_missed),
+      _if_hit(if_hit),
+      _hit_prob(hit_prob)
+  {}
+
+  virtual bool is_inline()   const { return _if_hit->is_inline(); }
+  virtual bool is_deferred() const { return _if_hit->is_deferred(); }
+
+  virtual JVMState* generate(JVMState* jvms);
+};
+
+
+CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
+                                                         CallGenerator* if_missed,
+                                                         CallGenerator* if_hit,
+                                                         float hit_prob) {
+  return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob);
+}
+
+
+JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
+  GraphKit kit(jvms);
+  PhaseGVN& gvn = kit.gvn();
+
+  CompileLog* log = kit.C->log();
+  if (log != NULL) {
+    log->elem("predicted_dynamic_call bci='%d'", jvms->bci());
+  }
+
+  // Get the constant pool cache from the caller class.
+  ciMethod* caller_method = jvms->method();
+  ciBytecodeStream str(caller_method);
+  str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
+  ciCPCache* cpcache = str.get_cpcache();
+
+  // Get the offset of the CallSite from the constant pool cache
+  // pointer.
+  int index = str.get_method_index();
+  size_t call_site_offset = cpcache->get_f1_offset(index);
+
+  // Load the CallSite object from the constant pool cache.
+  const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
+  Node* cpcache_adr   = kit.makecon(cpcache_ptr);
+  Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
+  Node* call_site     = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
+
+  // Load the target MethodHandle from the CallSite object.
+  Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
+  Node* target_mh  = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
+
+  // Check if the MethodHandle is still the same.
+  const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true);
+  Node* predicted_mh = kit.makecon(predicted_mh_ptr);
+
+  Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
+  Node* bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
+  IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
+  kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff)));
+  Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff));
+
+  SafePointNode* slow_map = NULL;
+  JVMState* slow_jvms;
+  { PreserveJVMState pjvms(&kit);
+    kit.set_control(slow_ctl);
+    if (!kit.stopped()) {
+      slow_jvms = _if_missed->generate(kit.sync_jvms());
+      assert(slow_jvms != NULL, "miss path must not fail to generate");
+      kit.add_exception_states_from(slow_jvms);
+      kit.set_map(slow_jvms->map());
+      if (!kit.stopped())
+        slow_map = kit.stop();
+    }
+  }
+
+  if (kit.stopped()) {
+    // Instance exactly does not matches the desired type.
+    kit.set_jvms(slow_jvms);
+    return kit.transfer_exceptions_into_jvms();
+  }
+
+  // Make the hot call:
+  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
+  if (new_jvms == NULL) {
+    // Inline failed, so make a direct call.
+    assert(_if_hit->is_inline(), "must have been a failed inline");
+    CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
+    new_jvms = cg->generate(kit.sync_jvms());
+  }
+  kit.add_exception_states_from(new_jvms);
+  kit.set_jvms(new_jvms);
+
+  // Need to merge slow and fast?
+  if (slow_map == NULL) {
+    // The fast path is the only path remaining.
+    return kit.transfer_exceptions_into_jvms();
+  }
+
+  if (kit.stopped()) {
+    // Inlined method threw an exception, so it's just the slow path after all.
+    kit.set_jvms(slow_jvms);
+    return kit.transfer_exceptions_into_jvms();
+  }
+
+  // Finish the diamond.
+  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
+  RegionNode* region = new (kit.C, 3) RegionNode(3);
+  region->init_req(1, kit.control());
+  region->init_req(2, slow_map->control());
+  kit.set_control(gvn.transform(region));
+  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
+  iophi->set_req(2, slow_map->i_o());
+  kit.set_i_o(gvn.transform(iophi));
+  kit.merge_memory(slow_map->merged_memory(), region, 2);
+  uint tos = kit.jvms()->stkoff() + kit.sp();
+  uint limit = slow_map->req();
+  for (uint i = TypeFunc::Parms; i < limit; i++) {
+    // Skip unused stack slots; fast forward to monoff();
+    if (i == tos) {
+      i = kit.jvms()->monoff();
+      if( i >= limit ) break;
+    }
+    Node* m = kit.map()->in(i);
+    Node* n = slow_map->in(i);
+    if (m != n) {
+      const Type* t = gvn.type(m)->meet(gvn.type(n));
+      Node* phi = PhiNode::make(region, m, t);
+      phi->set_req(2, n);
+      kit.map()->set_req(i, gvn.transform(phi));
+    }
+  }
+  return kit.transfer_exceptions_into_jvms();
+}
+
+
 //-------------------------UncommonTrapCallGenerator-----------------------------
 // Internal class which handles all out-of-line calls checking receiver type.
 class UncommonTrapCallGenerator : public CallGenerator {
--- a/src/share/vm/opto/callGenerator.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/callGenerator.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -57,6 +57,13 @@
   // is_trap: Does not return to the caller.  (E.g., uncommon trap.)
   virtual bool      is_trap() const             { return false; }
 
+  // is_late_inline: supports conversion of call into an inline
+  virtual bool      is_late_inline() const      { return false; }
+  // Replace the call with an inline version of the code
+  virtual void do_late_inline() { ShouldNotReachHere(); }
+
+  virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
+
   // Note:  It is possible for a CG to be both inline and virtual.
   // (The hashCode intrinsic does a vtable check and an inlined fast path.)
 
@@ -92,9 +99,13 @@
   static CallGenerator* for_osr(ciMethod* m, int osr_bci);
 
   // How to generate vanilla out-of-line call sites:
-  static CallGenerator* for_direct_call(ciMethod* m);   // static, special
+  static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false);   // static, special
+  static CallGenerator* for_dynamic_call(ciMethod* m);   // invokedynamic
   static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index);  // virtual, interface
 
+  // How to generate a replace a direct call with an inline version
+  static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
+
   // How to make a call but defer the decision whether to inline or not.
   static CallGenerator* for_warm_call(WarmCallInfo* ci,
                                       CallGenerator* if_cold,
@@ -106,6 +117,12 @@
                                            CallGenerator* if_hit,
                                            float hit_prob);
 
+  // How to make a call that optimistically assumes a MethodHandle target:
+  static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
+                                                   CallGenerator* if_missed,
+                                                   CallGenerator* if_hit,
+                                                   float hit_prob);
+
   // How to make a call that gives up and goes back to the interpreter:
   static CallGenerator* for_uncommon_trap(ciMethod* m,
                                           Deoptimization::DeoptReason reason,
--- a/src/share/vm/opto/callnode.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/callnode.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -693,6 +693,84 @@
 }
 
 
+void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) {
+  projs->fallthrough_proj      = NULL;
+  projs->fallthrough_catchproj = NULL;
+  projs->fallthrough_ioproj    = NULL;
+  projs->catchall_ioproj       = NULL;
+  projs->catchall_catchproj    = NULL;
+  projs->fallthrough_memproj   = NULL;
+  projs->catchall_memproj      = NULL;
+  projs->resproj               = NULL;
+  projs->exobj                 = NULL;
+
+  for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+    ProjNode *pn = fast_out(i)->as_Proj();
+    if (pn->outcnt() == 0) continue;
+    switch (pn->_con) {
+    case TypeFunc::Control:
+      {
+        // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
+        projs->fallthrough_proj = pn;
+        DUIterator_Fast jmax, j = pn->fast_outs(jmax);
+        const Node *cn = pn->fast_out(j);
+        if (cn->is_Catch()) {
+          ProjNode *cpn = NULL;
+          for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
+            cpn = cn->fast_out(k)->as_Proj();
+            assert(cpn->is_CatchProj(), "must be a CatchProjNode");
+            if (cpn->_con == CatchProjNode::fall_through_index)
+              projs->fallthrough_catchproj = cpn;
+            else {
+              assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
+              projs->catchall_catchproj = cpn;
+            }
+          }
+        }
+        break;
+      }
+    case TypeFunc::I_O:
+      if (pn->_is_io_use)
+        projs->catchall_ioproj = pn;
+      else
+        projs->fallthrough_ioproj = pn;
+      for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
+        Node* e = pn->out(j);
+        if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj()) {
+          assert(projs->exobj == NULL, "only one");
+          projs->exobj = e;
+        }
+      }
+      break;
+    case TypeFunc::Memory:
+      if (pn->_is_io_use)
+        projs->catchall_memproj = pn;
+      else
+        projs->fallthrough_memproj = pn;
+      break;
+    case TypeFunc::Parms:
+      projs->resproj = pn;
+      break;
+    default:
+      assert(false, "unexpected projection from allocation node.");
+    }
+  }
+
+  // The resproj may not exist because the result couuld be ignored
+  // and the exception object may not exist if an exception handler
+  // swallows the exception but all the other must exist and be found.
+  assert(projs->fallthrough_proj      != NULL, "must be found");
+  assert(projs->fallthrough_catchproj != NULL, "must be found");
+  assert(projs->fallthrough_memproj   != NULL, "must be found");
+  assert(projs->fallthrough_ioproj    != NULL, "must be found");
+  assert(projs->catchall_catchproj    != NULL, "must be found");
+  if (separate_io_proj) {
+    assert(projs->catchall_memproj      != NULL, "must be found");
+    assert(projs->catchall_ioproj       != NULL, "must be found");
+  }
+}
+
+
 //=============================================================================
 uint CallJavaNode::size_of() const { return sizeof(*this); }
 uint CallJavaNode::cmp( const Node &n ) const {
--- a/src/share/vm/opto/callnode.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/callnode.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -470,6 +470,23 @@
 #endif
 };
 
+
+// Simple container for the outgoing projections of a call.  Useful
+// for serious surgery on calls.
+class CallProjections : public StackObj {
+public:
+  Node* fallthrough_proj;
+  Node* fallthrough_catchproj;
+  Node* fallthrough_memproj;
+  Node* fallthrough_ioproj;
+  Node* catchall_catchproj;
+  Node* catchall_memproj;
+  Node* catchall_ioproj;
+  Node* resproj;
+  Node* exobj;
+};
+
+
 //------------------------------CallNode---------------------------------------
 // Call nodes now subsume the function of debug nodes at callsites, so they
 // contain the functionality of a full scope chain of debug nodes.
@@ -521,6 +538,11 @@
   // or returns NULL if there is no one.
   Node *result_cast();
 
+  // Collect all the interesting edges from a call for use in
+  // replacing the call by something else.  Used by macro expansion
+  // and the late inlining support.
+  void extract_projections(CallProjections* projs, bool separate_io_proj);
+
   virtual uint match_edge(uint idx) const;
 
 #ifndef PRODUCT
@@ -529,6 +551,7 @@
 #endif
 };
 
+
 //------------------------------CallJavaNode-----------------------------------
 // Make a static or dynamic subroutine call node using Java calling
 // convention.  (The "Java" calling convention is the compiler's calling
@@ -539,12 +562,15 @@
   virtual uint size_of() const; // Size is bigger
 
   bool    _optimized_virtual;
+  bool    _method_handle_invoke;
   ciMethod* _method;            // Method being direct called
 public:
   const int       _bci;         // Byte Code Index of call byte code
   CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
     : CallNode(tf, addr, TypePtr::BOTTOM),
-      _method(method), _bci(bci), _optimized_virtual(false)
+      _method(method), _bci(bci),
+      _optimized_virtual(false),
+      _method_handle_invoke(false)
   {
     init_class_id(Class_CallJava);
   }
@@ -554,6 +580,8 @@
   void  set_method(ciMethod *m)           { _method = m; }
   void  set_optimized_virtual(bool f)     { _optimized_virtual = f; }
   bool  is_optimized_virtual() const      { return _optimized_virtual; }
+  void  set_method_handle_invoke(bool f)  { _method_handle_invoke = f; }
+  bool  is_method_handle_invoke() const   { return _method_handle_invoke; }
 
 #ifndef PRODUCT
   virtual void  dump_spec(outputStream *st) const;
--- a/src/share/vm/opto/compile.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/compile.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -224,6 +224,32 @@
 }
 
 
+void Compile::gvn_replace_by(Node* n, Node* nn) {
+  for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
+    Node* use = n->last_out(i);
+    bool is_in_table = initial_gvn()->hash_delete(use);
+    uint uses_found = 0;
+    for (uint j = 0; j < use->len(); j++) {
+      if (use->in(j) == n) {
+        if (j < use->req())
+          use->set_req(j, nn);
+        else
+          use->set_prec(j, nn);
+        uses_found++;
+      }
+    }
+    if (is_in_table) {
+      // reinsert into table
+      initial_gvn()->hash_find_insert(use);
+    }
+    record_for_igvn(use);
+    i -= uses_found;    // we deleted 1 or more copies of this edge
+  }
+}
+
+
+
+
 // Identify all nodes that are reachable from below, useful.
 // Use breadth-first pass that records state in a Unique_Node_List,
 // recursive traversal is slower.
@@ -439,6 +465,7 @@
                   _code_buffer("Compile::Fill_buffer"),
                   _orig_pc_slot(0),
                   _orig_pc_slot_offset_in_bytes(0),
+                  _has_method_handle_invokes(false),
                   _node_bundling_limit(0),
                   _node_bundling_base(NULL),
                   _java_calls(0),
@@ -554,6 +581,28 @@
       rethrow_exceptions(kit.transfer_exceptions_into_jvms());
     }
 
+    if (!failing() && has_stringbuilder()) {
+      {
+        // remove useless nodes to make the usage analysis simpler
+        ResourceMark rm;
+        PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
+      }
+
+      {
+        ResourceMark rm;
+        print_method("Before StringOpts", 3);
+        PhaseStringOpts pso(initial_gvn(), &for_igvn);
+        print_method("After StringOpts", 3);
+      }
+
+      // now inline anything that we skipped the first time around
+      while (_late_inlines.length() > 0) {
+        CallGenerator* cg = _late_inlines.pop();
+        cg->do_late_inline();
+      }
+    }
+    assert(_late_inlines.length() == 0, "should have been processed");
+
     print_method("Before RemoveUseless", 3);
 
     // Remove clutter produced by parsing.
@@ -711,6 +760,7 @@
     _do_escape_analysis(false),
     _failure_reason(NULL),
     _code_buffer("Compile::Fill_buffer"),
+    _has_method_handle_invokes(false),
     _node_bundling_limit(0),
     _node_bundling_base(NULL),
     _java_calls(0),
@@ -820,6 +870,7 @@
   _fixed_slots = 0;
   set_has_split_ifs(false);
   set_has_loops(has_method() && method()->has_loops()); // first approximation
+  set_has_stringbuilder(false);
   _deopt_happens = true;  // start out assuming the worst
   _trap_can_recompile = false;  // no traps emitted yet
   _major_progress = true; // start out assuming good things will happen
@@ -883,6 +934,7 @@
 
   _intrinsics = NULL;
   _macro_nodes = new GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
+  _predicate_opaqs = new GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   register_library_intrinsics();
 }
 
@@ -1504,6 +1556,19 @@
   }
 }
 
+//---------------------cleanup_loop_predicates-----------------------
+// Remove the opaque nodes that protect the predicates so that all unused
+// checks and uncommon_traps will be eliminated from the ideal graph
+void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
+  if (predicate_count()==0) return;
+  for (int i = predicate_count(); i > 0; i--) {
+    Node * n = predicate_opaque1_node(i-1);
+    assert(n->Opcode() == Op_Opaque1, "must be");
+    igvn.replace_node(n, n->in(1));
+  }
+  assert(predicate_count()==0, "should be clean!");
+  igvn.optimize();
+}
 
 //------------------------------Optimize---------------------------------------
 // Given a graph, optimize it.
@@ -1545,7 +1610,7 @@
   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
     {
       TracePhase t2("idealLoop", &_t_idealLoop, true);
-      PhaseIdealLoop ideal_loop( igvn, true );
+      PhaseIdealLoop ideal_loop( igvn, true, UseLoopPredicate);
       loop_opts_cnt--;
       if (major_progress()) print_method("PhaseIdealLoop 1", 2);
       if (failing())  return;
@@ -1553,7 +1618,7 @@
     // Loop opts pass if partial peeling occurred in previous pass
     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
       TracePhase t3("idealLoop", &_t_idealLoop, true);
-      PhaseIdealLoop ideal_loop( igvn, false );
+      PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate);
       loop_opts_cnt--;
       if (major_progress()) print_method("PhaseIdealLoop 2", 2);
       if (failing())  return;
@@ -1561,7 +1626,7 @@
     // Loop opts pass for loop-unrolling before CCP
     if(major_progress() && (loop_opts_cnt > 0)) {
       TracePhase t4("idealLoop", &_t_idealLoop, true);
-      PhaseIdealLoop ideal_loop( igvn, false );
+      PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate);
       loop_opts_cnt--;
       if (major_progress()) print_method("PhaseIdealLoop 3", 2);
     }
@@ -1599,13 +1664,21 @@
   // peeling, unrolling, etc.
   if(loop_opts_cnt > 0) {
     debug_only( int cnt = 0; );
+    bool loop_predication = UseLoopPredicate;
     while(major_progress() && (loop_opts_cnt > 0)) {
       TracePhase t2("idealLoop", &_t_idealLoop, true);
       assert( cnt++ < 40, "infinite cycle in loop optimization" );
-      PhaseIdealLoop ideal_loop( igvn, true );
+      PhaseIdealLoop ideal_loop( igvn, true, loop_predication);
       loop_opts_cnt--;
       if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
       if (failing())  return;
+      // Perform loop predication optimization during first iteration after CCP.
+      // After that switch it off and cleanup unused loop predicates.
+      if (loop_predication) {
+        loop_predication = false;
+        cleanup_loop_predicates(igvn);
+        if (failing())  return;
+      }
     }
   }
 
@@ -1803,6 +1876,7 @@
           !n->is_Phi() &&       // a few noisely useless nodes
           !n->is_Proj() &&
           !n->is_MachTemp() &&
+          !n->is_SafePointScalarObject() &&
           !n->is_Catch() &&     // Would be nice to print exception table targets
           !n->is_MergeMem() &&  // Not very interesting
           !n->is_top() &&       // Debug info table constants
@@ -2240,6 +2314,30 @@
     break;
   }
 
+  case Op_Proj: {
+    if (OptimizeStringConcat) {
+      ProjNode* p = n->as_Proj();
+      if (p->_is_io_use) {
+        // Separate projections were used for the exception path which
+        // are normally removed by a late inline.  If it wasn't inlined
+        // then they will hang around and should just be replaced with
+        // the original one.
+        Node* proj = NULL;
+        // Replace with just one
+        for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
+          Node *use = i.get();
+          if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
+            proj = use;
+            break;
+          }
+        }
+        assert(p != NULL, "must be found");
+        p->subsume_by(proj);
+      }
+    }
+    break;
+  }
+
   case Op_Phi:
     if (n->as_Phi()->bottom_type()->isa_narrowoop()) {
       // The EncodeP optimization may create Phi with the same edges
--- a/src/share/vm/opto/compile.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/compile.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
 class OptoReg;
 class PhaseCFG;
 class PhaseGVN;
+class PhaseIterGVN;
 class PhaseRegAlloc;
 class PhaseCCP;
 class PhaseCCP_DCE;
@@ -149,6 +150,7 @@
   bool                  _has_loops;             // True if the method _may_ have some loops
   bool                  _has_split_ifs;         // True if the method _may_ have some split-if
   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
+  bool                  _has_stringbuilder;     // True StringBuffers or StringBuilders are allocated
   uint                  _trap_hist[trapHistLength];  // Cumulative traps
   bool                  _trap_can_recompile;    // Have we emitted a recompiling trap?
   uint                  _decompile_count;       // Cumulative decompilation counts.
@@ -164,6 +166,9 @@
   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 #endif
 
+  // JSR 292
+  bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
+
   // Compilation environment.
   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
   ciEnv*                _env;                   // CI interface
@@ -171,6 +176,7 @@
   const char*           _failure_reason;        // for record_failure/failing pattern
   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
+  GrowableArray<Node*>* _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
   ConnectionGraph*      _congraph;
 #ifndef PRODUCT
   IdealGraphPrinter*    _printer;
@@ -219,6 +225,9 @@
   Unique_Node_List*     _for_igvn;              // Initial work-list for next round of Iterative GVN
   WarmCallInfo*         _warm_calls;            // Sorted work-list for heat-based inlining.
 
+  GrowableArray<CallGenerator*> _late_inlines;  // List of CallGenerators to be revisited after
+                                                // main parsing has finished.
+
   // Matching, CFG layout, allocation, code generation
   PhaseCFG*             _cfg;                   // Results of CFG finding
   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
@@ -298,6 +307,8 @@
   void          set_has_split_ifs(bool z)       { _has_split_ifs = z; }
   bool              has_unsafe_access() const   { return _has_unsafe_access; }
   void          set_has_unsafe_access(bool z)   { _has_unsafe_access = z; }
+  bool              has_stringbuilder() const   { return _has_stringbuilder; }
+  void          set_has_stringbuilder(bool z)   { _has_stringbuilder = z; }
   void          set_trap_count(uint r, uint c)  { assert(r < trapHistLength, "oob");        _trap_hist[r] = c; }
   uint              trap_count(uint r) const    { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
   bool              trap_can_recompile() const  { return _trap_can_recompile; }
@@ -328,6 +339,10 @@
   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
 #endif
 
+  // JSR 292
+  bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
+  void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
+
   void begin_method() {
 #ifndef PRODUCT
     if (_printer) _printer->begin_method(this);
@@ -345,7 +360,9 @@
   }
 
   int           macro_count()                   { return _macro_nodes->length(); }
+  int           predicate_count()               { return _predicate_opaqs->length();}
   Node*         macro_node(int idx)             { return _macro_nodes->at(idx); }
+  Node*         predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
   ConnectionGraph* congraph()                   { return _congraph;}
   void add_macro_node(Node * n) {
     //assert(n->is_macro(), "must be a macro node");
@@ -357,7 +374,19 @@
     // that the node is in the array before attempting to remove it
     if (_macro_nodes->contains(n))
       _macro_nodes->remove(n);
+    // remove from _predicate_opaqs list also if it is there
+    if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
+      _predicate_opaqs->remove(n);
+    }
   }
+  void add_predicate_opaq(Node * n) {
+    assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1");
+    assert(_macro_nodes->contains(n), "should have already been in macro list");
+    _predicate_opaqs->append(n);
+  }
+  // remove the opaque nodes that protect the predicates so that the unused checks and
+  // uncommon traps will be eliminated from the graph.
+  void cleanup_loop_predicates(PhaseIterGVN &igvn);
 
   // Compilation environment.
   Arena*            comp_arena()                { return &_comp_arena; }
@@ -475,6 +504,7 @@
   // Decide how to build a call.
   // The profile factor is a discount to apply to this site's interp. profile.
   CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor);
+  bool should_delay_inlining(ciMethod* call_method, JVMState* jvms);
 
   // Report if there were too many traps at a current method and bci.
   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
@@ -495,6 +525,11 @@
   void          set_initial_gvn(PhaseGVN *gvn)           { _initial_gvn = gvn; }
   void          set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
 
+  // Replace n by nn using initial_gvn, calling hash_delete and
+  // record_for_igvn as needed.
+  void gvn_replace_by(Node* n, Node* nn);
+
+
   void              identify_useful_nodes(Unique_Node_List &useful);
   void              remove_useless_nodes  (Unique_Node_List &useful);
 
@@ -502,6 +537,9 @@
   void          set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
   WarmCallInfo* pop_warm_call();
 
+  // Record this CallGenerator for inlining at the end of parsing.
+  void              add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }
+
   // Matching, CFG layout, allocation, code generation
   PhaseCFG*         cfg()                       { return _cfg; }
   bool              select_24_bit_instr() const { return _select_24_bit_instr; }
--- a/src/share/vm/opto/divnode.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/divnode.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -114,7 +114,8 @@
       if( andconi_t && andconi_t->is_con() ) {
         jint andconi = andconi_t->get_con();
         if( andconi < 0 && is_power_of_2(-andconi) && (-andconi) >= d ) {
-          dividend = dividend->in(1);
+          if( (-andconi) == d ) // Remove AND if it clears bits which will be shifted
+            dividend = dividend->in(1);
           needs_rounding = false;
         }
       }
@@ -356,7 +357,8 @@
       if( andconl_t && andconl_t->is_con() ) {
         jlong andconl = andconl_t->get_con();
         if( andconl < 0 && is_power_of_2_long(-andconl) && (-andconl) >= d ) {
-          dividend = dividend->in(1);
+          if( (-andconl) == d ) // Remove AND if it clears bits which will be shifted
+            dividend = dividend->in(1);
           needs_rounding = false;
         }
       }
--- a/src/share/vm/opto/doCall.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/doCall.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,9 @@
 }
 #endif
 
-CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float prof_factor) {
+CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
+                                       JVMState* jvms, bool allow_inline,
+                                       float prof_factor) {
   CallGenerator* cg;
 
   // Dtrace currently doesn't work unless all calls are vanilla
@@ -68,7 +70,7 @@
   CompileLog* log = this->log();
   if (log != NULL) {
     int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
-    int r2id = (profile.morphism() == 2)? log->identify(profile.receiver(1)):-1;
+    int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
     log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
                     log->identify(call_method), site_count, prof_factor);
     if (call_is_virtual)  log->print(" virtual='1'");
@@ -116,7 +118,7 @@
         // TO DO:  When UseOldInlining is removed, copy the ILT code elsewhere.
         float site_invoke_ratio = prof_factor;
         // Note:  ilt is for the root of this parse, not the present call site.
-        ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio);
+        ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, 0);
       }
       WarmCallInfo scratch_ci;
       if (!UseOldInlining)
@@ -128,6 +130,12 @@
 
       if (allow_inline) {
         CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses);
+        if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) {
+          // Delay the inlining of this method to give us the
+          // opportunity to perform some high level optimizations
+          // first.
+          return CallGenerator::for_late_inline(call_method, cg);
+        }
         if (cg == NULL) {
           // Fall through.
         } else if (require_inline || !InlineWarmCalls) {
@@ -174,26 +182,16 @@
             }
           }
           CallGenerator* miss_cg;
+          Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
+                                    Deoptimization::Reason_bimorphic :
+                                    Deoptimization::Reason_class_check;
           if (( profile.morphism() == 1 ||
                (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
-
-              !too_many_traps(Deoptimization::Reason_class_check)
-
-              // Check only total number of traps per method to allow
-              // the transition from monomorphic to bimorphic case between
-              // compilations without falling into virtual call.
-              // A monomorphic case may have the class_check trap flag is set
-              // due to the time gap between the uncommon trap processing
-              // when flags are set in MDO and the call site bytecode execution
-              // in Interpreter when MDO counters are updated.
-              // There was also class_check trap in monomorphic case due to
-              // the bug 6225440.
-
+              !too_many_traps(jvms->method(), jvms->bci(), reason)
              ) {
             // Generate uncommon trap for class check failure path
             // in case of monomorphic or bimorphic virtual call site.
-            miss_cg = CallGenerator::for_uncommon_trap(call_method,
-                        Deoptimization::Reason_class_check,
+            miss_cg = CallGenerator::for_uncommon_trap(call_method, reason,
                         Deoptimization::Action_maybe_recompile);
           } else {
             // Generate virtual call for class check failure path
@@ -218,6 +216,57 @@
     }
   }
 
+  // Do MethodHandle calls.
+  if (call_method->is_method_handle_invoke()) {
+    if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) {
+      GraphKit kit(jvms);
+      Node* n = kit.argument(0);
+
+      if (n->Opcode() == Op_ConP) {
+        const TypeOopPtr* oop_ptr = n->bottom_type()->is_oopptr();
+        ciObject* const_oop = oop_ptr->const_oop();
+        ciMethodHandle* method_handle = const_oop->as_method_handle();
+
+        // Set the actually called method to have access to the class
+        // and signature in the MethodHandleCompiler.
+        method_handle->set_callee(call_method);
+
+        // Get an adapter for the MethodHandle.
+        ciMethod* target_method = method_handle->get_method_handle_adapter();
+
+        CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
+        if (hit_cg != NULL && hit_cg->is_inline())
+          return hit_cg;
+      }
+
+      return CallGenerator::for_direct_call(call_method);
+    }
+    else {
+      // Get the MethodHandle from the CallSite.
+      ciMethod* caller_method = jvms->method();
+      ciBytecodeStream str(caller_method);
+      str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
+      ciCallSite*     call_site     = str.get_call_site();
+      ciMethodHandle* method_handle = call_site->get_target();
+
+      // Set the actually called method to have access to the class
+      // and signature in the MethodHandleCompiler.
+      method_handle->set_callee(call_method);
+
+      // Get an adapter for the MethodHandle.
+      ciMethod* target_method = method_handle->get_invokedynamic_adapter();
+
+      CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
+      if (hit_cg != NULL && hit_cg->is_inline()) {
+        CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
+        return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
+      }
+
+      // If something failed, generate a normal dynamic call.
+      return CallGenerator::for_dynamic_call(call_method);
+    }
+  }
+
   // There was no special inlining tactic, or it bailed out.
   // Use a more generic tactic, like a simple call.
   if (call_is_virtual) {
@@ -225,10 +274,63 @@
   } else {
     // Class Hierarchy Analysis or Type Profile reveals a unique target,
     // or it is a static or special call.
-    return CallGenerator::for_direct_call(call_method);
+    return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms));
   }
 }
 
+// Return true for methods that shouldn't be inlined early so that
+// they are easier to analyze and optimize as intrinsics.
+bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
+  if (has_stringbuilder()) {
+
+    if ((call_method->holder() == C->env()->StringBuilder_klass() ||
+         call_method->holder() == C->env()->StringBuffer_klass()) &&
+        (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
+         jvms->method()->holder() == C->env()->StringBuffer_klass())) {
+      // Delay SB calls only when called from non-SB code
+      return false;
+    }
+
+    switch (call_method->intrinsic_id()) {
+      case vmIntrinsics::_StringBuilder_void:
+      case vmIntrinsics::_StringBuilder_int:
+      case vmIntrinsics::_StringBuilder_String:
+      case vmIntrinsics::_StringBuilder_append_char:
+      case vmIntrinsics::_StringBuilder_append_int:
+      case vmIntrinsics::_StringBuilder_append_String:
+      case vmIntrinsics::_StringBuilder_toString:
+      case vmIntrinsics::_StringBuffer_void:
+      case vmIntrinsics::_StringBuffer_int:
+      case vmIntrinsics::_StringBuffer_String:
+      case vmIntrinsics::_StringBuffer_append_char:
+      case vmIntrinsics::_StringBuffer_append_int:
+      case vmIntrinsics::_StringBuffer_append_String:
+      case vmIntrinsics::_StringBuffer_toString:
+      case vmIntrinsics::_Integer_toString:
+        return true;
+
+      case vmIntrinsics::_String_String:
+        {
+          Node* receiver = jvms->map()->in(jvms->argoff() + 1);
+          if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
+            CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
+            ciMethod* m = csj->method();
+            if (m != NULL &&
+                (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
+                 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
+              // Delay String.<init>(new SB())
+              return true;
+          }
+          return false;
+        }
+
+      default:
+        return false;
+    }
+  }
+  return false;
+}
+
 
 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
@@ -240,7 +342,7 @@
   // Interface classes can be loaded & linked and never get around to
   // being initialized.  Uncommon-trap for not-initialized static or
   // v-calls.  Let interface calls happen.
-  ciInstanceKlass* holder_klass  = dest_method->holder();
+  ciInstanceKlass* holder_klass = dest_method->holder();
   if (!holder_klass->is_initialized() &&
       !holder_klass->is_interface()) {
     uncommon_trap(Deoptimization::Reason_uninitialized,
@@ -248,14 +350,6 @@
                   holder_klass);
     return true;
   }
-  if (dest_method->is_method_handle_invoke()
-      && holder_klass->name() == ciSymbol::java_dyn_Dynamic()) {
-    // FIXME: NYI
-    uncommon_trap(Deoptimization::Reason_unhandled,
-                  Deoptimization::Action_none,
-                  holder_klass);
-    return true;
-  }
 
   assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
   return false;
@@ -274,6 +368,7 @@
   bool is_virtual = bc() == Bytecodes::_invokevirtual;
   bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
   bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
+  bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
 
   // Find target being called
   bool             will_link;
@@ -282,7 +377,8 @@
   ciKlass* holder = iter().get_declared_method_holder();
   ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
 
-  int   nargs    = dest_method->arg_size();
+  int nargs = dest_method->arg_size();
+  if (is_invokedynamic)  nargs -= 1;
 
   // uncommon-trap when callee is unloaded, uninitialized or will not link
   // bailout when too many arguments for register representation
@@ -296,7 +392,7 @@
     return;
   }
   assert(holder_klass->is_loaded(), "");
-  assert(dest_method->is_static() == !has_receiver, "must match bc");
+  assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
   // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
   // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
   assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
--- a/src/share/vm/opto/escape.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/escape.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -537,11 +537,13 @@
   }
 
   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
-  // Do NOT remove the next call: ensure an new alias index is allocated
-  // for the instance type
+  // Do NOT remove the next line: ensure a new alias index is allocated
+  // for the instance type. Note: C++ will not remove it since the call
+  // has side effect.
   int alias_idx = _compile->get_alias_index(tinst);
   igvn->set_type(addp, tinst);
   // record the allocation in the node map
+  assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
   set_map(addp->_idx, get_map(base->_idx));
 
   // Set addp's Base and Address to 'base'.
@@ -617,9 +619,14 @@
   const TypePtr *atype = C->get_adr_type(alias_idx);
   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
   C->copy_node_notes_to(result, orig_phi);
-  set_map_phi(orig_phi->_idx, result);
   igvn->set_type(result, result->bottom_type());
   record_for_optimizer(result);
+
+  debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
+  assert(pn == NULL || pn == orig_phi, "wrong node");
+  set_map(orig_phi->_idx, result);
+  ptnode_adr(orig_phi->_idx)->_node = orig_phi;
+
   new_created = true;
   return result;
 }
@@ -710,6 +717,81 @@
 }
 
 //
+// Move memory users to their memory slices.
+//
+void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn) {
+  Compile* C = _compile;
+
+  const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
+  assert(tp != NULL, "ptr type");
+  int alias_idx = C->get_alias_index(tp);
+  int general_idx = C->get_general_index(alias_idx);
+
+  // Move users first
+  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+    Node* use = n->fast_out(i);
+    if (use->is_MergeMem()) {
+      MergeMemNode* mmem = use->as_MergeMem();
+      assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
+      if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
+        continue; // Nothing to do
+      }
+      // Replace previous general reference to mem node.
+      uint orig_uniq = C->unique();
+      Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
+      assert(orig_uniq == C->unique(), "no new nodes");
+      mmem->set_memory_at(general_idx, m);
+      --imax;
+      --i;
+    } else if (use->is_MemBar()) {
+      assert(!use->is_Initialize(), "initializing stores should not be moved");
+      if (use->req() > MemBarNode::Precedent &&
+          use->in(MemBarNode::Precedent) == n) {
+        // Don't move related membars.
+        record_for_optimizer(use);
+        continue;
+      }
+      tp = use->as_MemBar()->adr_type()->isa_ptr();
+      if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
+          alias_idx == general_idx) {
+        continue; // Nothing to do
+      }
+      // Move to general memory slice.
+      uint orig_uniq = C->unique();
+      Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
+      assert(orig_uniq == C->unique(), "no new nodes");
+      igvn->hash_delete(use);
+      imax -= use->replace_edge(n, m);
+      igvn->hash_insert(use);
+      record_for_optimizer(use);
+      --i;
+#ifdef ASSERT
+    } else if (use->is_Mem()) {
+      if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
+        // Don't move related cardmark.
+        continue;
+      }
+      // Memory nodes should have new memory input.
+      tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
+      assert(tp != NULL, "ptr type");
+      int idx = C->get_alias_index(tp);
+      assert(get_map(use->_idx) != NULL || idx == alias_idx,
+             "Following memory nodes should have new memory input or be on the same memory slice");
+    } else if (use->is_Phi()) {
+      // Phi nodes should be split and moved already.
+      tp = use->as_Phi()->adr_type()->isa_ptr();
+      assert(tp != NULL, "ptr type");
+      int idx = C->get_alias_index(tp);
+      assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
+    } else {
+      use->dump();
+      assert(false, "should not be here");
+#endif
+    }
+  }
+}
+
+//
 // Search memory chain of "mem" to find a MemNode whose address
 // is the specified alias index.
 //
@@ -774,10 +856,18 @@
                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
       Node *un = result->as_Phi()->unique_input(phase);
       if (un != NULL) {
+        orig_phis.append_if_missing(result->as_Phi());
         result = un;
       } else {
         break;
       }
+    } else if (result->is_ClearArray()) {
+      if (!ClearArrayNode::step_through(&result, (uint)tinst->instance_id(), phase)) {
+        // Can not bypass initialization of the instance
+        // we are looking for.
+        break;
+      }
+      // Otherwise skip it (the call updated 'result' value).
     } else if (result->Opcode() == Op_SCMemProj) {
       assert(result->in(0)->is_LoadStore(), "sanity");
       const Type *at = phase->type(result->in(0)->in(MemNode::Address));
@@ -807,7 +897,6 @@
   return result;
 }
 
-
 //
 //  Convert the types of unescaped object to instance types where possible,
 //  propagate the new type information through the graph, and update memory
@@ -899,12 +988,13 @@
 //
 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
   GrowableArray<Node *>  memnode_worklist;
-  GrowableArray<Node *>  mergemem_worklist;
   GrowableArray<PhiNode *>  orig_phis;
+
   PhaseGVN  *igvn = _compile->initial_gvn();
   uint new_index_start = (uint) _compile->num_alias_types();
-  VectorSet visited(Thread::current()->resource_area());
-  VectorSet ptset(Thread::current()->resource_area());
+  Arena* arena = Thread::current()->resource_area();
+  VectorSet visited(arena);
+  VectorSet ptset(arena);
 
 
   //  Phase 1:  Process possible allocations from alloc_worklist.
@@ -980,6 +1070,8 @@
       //   - non-escaping
       //   - eligible to be a unique type
       //   - not determined to be ineligible by escape analysis
+      assert(ptnode_adr(alloc->_idx)->_node != NULL &&
+             ptnode_adr(n->_idx)->_node != NULL, "should be registered");
       set_map(alloc->_idx, n);
       set_map(n->_idx, alloc);
       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
@@ -1024,7 +1116,7 @@
               alloc_worklist.append_if_missing(addp2);
             }
             alloc_worklist.append_if_missing(use);
-          } else if (use->is_Initialize()) {
+          } else if (use->is_MemBar()) {
             memnode_worklist.append_if_missing(use);
           }
         }
@@ -1034,10 +1126,12 @@
       PointsTo(ptset, get_addp_base(n), igvn);
       assert(ptset.Size() == 1, "AddP address is unique");
       uint elem = ptset.getelem(); // Allocation node's index
-      if (elem == _phantom_object)
+      if (elem == _phantom_object) {
+        assert(false, "escaped allocation");
         continue; // Assume the value was set outside this method.
+      }
       Node *base = get_map(elem);  // CheckCastPP node
-      if (!split_AddP(n, base, igvn)) continue; // wrong type
+      if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
       tinst = igvn->type(base)->isa_oopptr();
     } else if (n->is_Phi() ||
                n->is_CheckCastPP() ||
@@ -1052,8 +1146,10 @@
       PointsTo(ptset, n, igvn);
       if (ptset.Size() == 1) {
         uint elem = ptset.getelem(); // Allocation node's index
-        if (elem == _phantom_object)
+        if (elem == _phantom_object) {
+          assert(false, "escaped allocation");
           continue; // Assume the value was set outside this method.
+        }
         Node *val = get_map(elem);   // CheckCastPP node
         TypeNode *tn = n->as_Type();
         tinst = igvn->type(val)->isa_oopptr();
@@ -1068,8 +1164,7 @@
           tn_t = tn_type->isa_oopptr();
         }
 
-        if (tn_t != NULL &&
-            tinst->cast_to_instance_id(TypeOopPtr::InstanceBot)->higher_equal(tn_t)) {
+        if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
           if (tn_type->isa_narrowoop()) {
             tn_type = tinst->make_narrowoop();
           } else {
@@ -1081,33 +1176,25 @@
           igvn->hash_insert(tn);
           record_for_optimizer(n);
         } else {
-          continue; // wrong type
+          assert(tn_type == TypePtr::NULL_PTR ||
+                 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
+                 "unexpected type");
+          continue; // Skip dead path with different type
         }
       }
     } else {
+      debug_only(n->dump();)
+      assert(false, "EA: unexpected node");
       continue;
     }
-    // push users on appropriate worklist
+    // push allocation's users on appropriate worklist
     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
       Node *use = n->fast_out(i);
       if(use->is_Mem() && use->in(MemNode::Address) == n) {
-        memnode_worklist.append_if_missing(use);
-      } else if (use->is_Initialize()) {
+        // Load/store to instance's field
         memnode_worklist.append_if_missing(use);
-      } else if (use->is_MergeMem()) {
-        mergemem_worklist.append_if_missing(use);
-      } else if (use->is_SafePoint() && tinst != NULL) {
-        // Look for MergeMem nodes for calls which reference unique allocation
-        // (through CheckCastPP nodes) even for debug info.
-        Node* m = use->in(TypeFunc::Memory);
-        uint iid = tinst->instance_id();
-        while (m->is_Proj() && m->in(0)->is_SafePoint() &&
-               m->in(0) != use && !m->in(0)->_idx != iid) {
-          m = m->in(0)->in(TypeFunc::Memory);
-        }
-        if (m->is_MergeMem()) {
-          mergemem_worklist.append_if_missing(m);
-        }
+      } else if (use->is_MemBar()) {
+        memnode_worklist.append_if_missing(use);
       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
         Node* addp2 = find_second_addp(use, n);
         if (addp2 != NULL) {
@@ -1120,6 +1207,29 @@
                  use->is_DecodeN() ||
                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
         alloc_worklist.append_if_missing(use);
+#ifdef ASSERT
+      } else if (use->is_Mem()) {
+        assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
+      } else if (use->is_MergeMem()) {
+        assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
+      } else if (use->is_SafePoint()) {
+        // Look for MergeMem nodes for calls which reference unique allocation
+        // (through CheckCastPP nodes) even for debug info.
+        Node* m = use->in(TypeFunc::Memory);
+        if (m->is_MergeMem()) {
+          assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
+        }
+      } else {
+        uint op = use->Opcode();
+        if (!(op == Op_CmpP || op == Op_Conv2B ||
+              op == Op_CastP2X || op == Op_StoreCM ||
+              op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
+              op == Op_StrEquals || op == Op_StrIndexOf)) {
+          n->dump();
+          use->dump();
+          assert(false, "EA: missing allocation reference path");
+        }
+#endif
       }
     }
 
@@ -1137,19 +1247,16 @@
     Node *n = memnode_worklist.pop();
     if (visited.test_set(n->_idx))
       continue;
-    if (n->is_Phi()) {
-      assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required");
-      // we don't need to do anything, but the users must be pushed if we haven't processed
-      // this Phi before
-    } else if (n->is_Initialize()) {
-      // we don't need to do anything, but the users of the memory projection must be pushed
-      n = n->as_Initialize()->proj_out(TypeFunc::Memory);
+    if (n->is_Phi() || n->is_ClearArray()) {
+      // we don't need to do anything, but the users must be pushed
+    } else if (n->is_MemBar()) { // Initialize, MemBar nodes
+      // we don't need to do anything, but the users must be pushed
+      n = n->as_MemBar()->proj_out(TypeFunc::Memory);
       if (n == NULL)
         continue;
     } else {
       assert(n->is_Mem(), "memory node required.");
       Node *addr = n->in(MemNode::Address);
-      assert(addr->is_AddP(), "AddP required");
       const Type *addr_t = igvn->type(addr);
       if (addr_t == Type::TOP)
         continue;
@@ -1161,6 +1268,10 @@
         return;
       }
       if (mem != n->in(MemNode::Memory)) {
+        // We delay the memory edge update since we need old one in
+        // MergeMem code below when instances memory slices are separated.
+        debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
+        assert(pn == NULL || pn == n, "wrong node");
         set_map(n->_idx, mem);
         ptnode_adr(n->_idx)->_node = n;
       }
@@ -1181,36 +1292,55 @@
     // push user on appropriate worklist
     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
       Node *use = n->fast_out(i);
-      if (use->is_Phi()) {
+      if (use->is_Phi() || use->is_ClearArray()) {
         memnode_worklist.append_if_missing(use);
       } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
+        if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
+          continue;
         memnode_worklist.append_if_missing(use);
-      } else if (use->is_Initialize()) {
+      } else if (use->is_MemBar()) {
         memnode_worklist.append_if_missing(use);
+#ifdef ASSERT
+      } else if(use->is_Mem()) {
+        assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
       } else if (use->is_MergeMem()) {
-        mergemem_worklist.append_if_missing(use);
+        assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
+      } else {
+        uint op = use->Opcode();
+        if (!(op == Op_StoreCM ||
+              (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
+               strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
+              op == Op_AryEq || op == Op_StrComp ||
+              op == Op_StrEquals || op == Op_StrIndexOf)) {
+          n->dump();
+          use->dump();
+          assert(false, "EA: missing memory path");
+        }
+#endif
       }
     }
   }
 
   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
-  //            Walk each memory moving the first node encountered of each
+  //            Walk each memory slice moving the first node encountered of each
   //            instance type to the the input corresponding to its alias index.
-  while (mergemem_worklist.length() != 0) {
-    Node *n = mergemem_worklist.pop();
-    assert(n->is_MergeMem(), "MergeMem node required.");
-    if (visited.test_set(n->_idx))
-      continue;
-    MergeMemNode *nmm = n->as_MergeMem();
+  uint length = _mergemem_worklist.length();
+  for( uint next = 0; next < length; ++next ) {
+    MergeMemNode* nmm = _mergemem_worklist.at(next);
+    assert(!visited.test_set(nmm->_idx), "should not be visited before");
     // Note: we don't want to use MergeMemStream here because we only want to
-    //  scan inputs which exist at the start, not ones we add during processing.
+    // scan inputs which exist at the start, not ones we add during processing.
+    // Note 2: MergeMem may already contains instance memory slices added
+    // during find_inst_mem() call when memory nodes were processed above.
+    igvn->hash_delete(nmm);
     uint nslices = nmm->req();
-    igvn->hash_delete(nmm);
     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
       Node* mem = nmm->in(i);
       Node* cur = NULL;
       if (mem == NULL || mem->is_top())
         continue;
+      // First, update mergemem by moving memory nodes to corresponding slices
+      // if their type became more precise since this mergemem was created.
       while (mem->is_Mem()) {
         const Type *at = igvn->type(mem->in(MemNode::Address));
         if (at != Type::TOP) {
@@ -1229,7 +1359,7 @@
       }
       nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
       // Find any instance of the current type if we haven't encountered
-      // a value of the instance along the chain.
+      // already a memory slice of the instance along the memory chain.
       for (uint ni = new_index_start; ni < new_index_end; ni++) {
         if((uint)_compile->get_general_index(ni) == i) {
           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
@@ -1245,11 +1375,11 @@
     }
     // Find the rest of instances values
     for (uint ni = new_index_start; ni < new_index_end; ni++) {
-      const TypeOopPtr *tinst = igvn->C->get_adr_type(ni)->isa_oopptr();
+      const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
       Node* result = step_through_mergemem(nmm, ni, tinst);
       if (result == nmm->base_memory()) {
         // Didn't find instance memory, search through general slice recursively.
-        result = nmm->memory_at(igvn->C->get_general_index(ni));
+        result = nmm->memory_at(_compile->get_general_index(ni));
         result = find_inst_mem(result, ni, orig_phis, igvn);
         if (_compile->failing()) {
           return;
@@ -1259,41 +1389,6 @@
     }
     igvn->hash_insert(nmm);
     record_for_optimizer(nmm);
-
-    // Propagate new memory slices to following MergeMem nodes.
-    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-      Node *use = n->fast_out(i);
-      if (use->is_Call()) {
-        CallNode* in = use->as_Call();
-        if (in->proj_out(TypeFunc::Memory) != NULL) {
-          Node* m = in->proj_out(TypeFunc::Memory);
-          for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
-            Node* mm = m->fast_out(j);
-            if (mm->is_MergeMem()) {
-              mergemem_worklist.append_if_missing(mm);
-            }
-          }
-        }
-        if (use->is_Allocate()) {
-          use = use->as_Allocate()->initialization();
-          if (use == NULL) {
-            continue;
-          }
-        }
-      }
-      if (use->is_Initialize()) {
-        InitializeNode* in = use->as_Initialize();
-        if (in->proj_out(TypeFunc::Memory) != NULL) {
-          Node* m = in->proj_out(TypeFunc::Memory);
-          for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
-            Node* mm = m->fast_out(j);
-            if (mm->is_MergeMem()) {
-              mergemem_worklist.append_if_missing(mm);
-            }
-          }
-        }
-      }
-    }
   }
 
   //  Phase 4:  Update the inputs of non-instance memory Phis and
@@ -1322,19 +1417,48 @@
   }
 
   // Update the memory inputs of MemNodes with the value we computed
-  // in Phase 2.
+  // in Phase 2 and move stores memory users to corresponding memory slices.
+#ifdef ASSERT
+  visited.Clear();
+  Node_Stack old_mems(arena, _compile->unique() >> 2);
+#endif
   for (uint i = 0; i < nodes_size(); i++) {
     Node *nmem = get_map(i);
     if (nmem != NULL) {
       Node *n = ptnode_adr(i)->_node;
-      if (n != NULL && n->is_Mem()) {
+      assert(n != NULL, "sanity");
+      if (n->is_Mem()) {
+#ifdef ASSERT
+        Node* old_mem = n->in(MemNode::Memory);
+        if (!visited.test_set(old_mem->_idx)) {
+          old_mems.push(old_mem, old_mem->outcnt());
+        }
+#endif
+        assert(n->in(MemNode::Memory) != nmem, "sanity");
+        if (!n->is_Load()) {
+          // Move memory users of a store first.
+          move_inst_mem(n, orig_phis, igvn);
+        }
+        // Now update memory input
         igvn->hash_delete(n);
         n->set_req(MemNode::Memory, nmem);
         igvn->hash_insert(n);
         record_for_optimizer(n);
+      } else {
+        assert(n->is_Allocate() || n->is_CheckCastPP() ||
+               n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
       }
     }
   }
+#ifdef ASSERT
+  // Verify that memory was split correctly
+  while (old_mems.is_nonempty()) {
+    Node* old_mem = old_mems.node();
+    uint  old_cnt = old_mems.index();
+    old_mems.pop();
+    assert(old_cnt = old_mem->outcnt(), "old mem could be lost");
+  }
+#endif
 }
 
 bool ConnectionGraph::has_candidates(Compile *C) {
@@ -1381,8 +1505,20 @@
         ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
       has_allocations = true;
     }
-    if(n->is_AddP())
-      cg_worklist.append(n->_idx);
+    if(n->is_AddP()) {
+      // Collect address nodes which directly reference an allocation.
+      // Use them during stage 3 below to build initial connection graph
+      // field edges. Other field edges could be added after StoreP/LoadP
+      // nodes are processed during stage 4 below.
+      Node* base = get_addp_base(n);
+      if(base->is_Proj() && base->in(0)->is_Allocate()) {
+        cg_worklist.append(n->_idx);
+      }
+    } else if (n->is_MergeMem()) {
+      // Collect all MergeMem nodes to add memory slices for
+      // scalar replaceable objects in split_unique_types().
+      _mergemem_worklist.append(n->as_MergeMem());
+    }
     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
       Node* m = n->fast_out(i);   // Get user
       worklist_init.push(m);
@@ -1423,12 +1559,13 @@
     }
   }
 
-  VectorSet ptset(Thread::current()->resource_area());
+  Arena* arena = Thread::current()->resource_area();
+  VectorSet ptset(arena);
   GrowableArray<uint>  deferred_edges;
-  VectorSet visited(Thread::current()->resource_area());
+  VectorSet visited(arena);
 
-  // 5. Remove deferred edges from the graph and collect
-  //    information needed for type splitting.
+  // 5. Remove deferred edges from the graph and adjust
+  //    escape state of nonescaping objects.
   cg_length = cg_worklist.length();
   for( uint next = 0; next < cg_length; ++next ) {
     int ni = cg_worklist.at(next);
@@ -1438,98 +1575,9 @@
       remove_deferred(ni, &deferred_edges, &visited);
       Node *n = ptn->_node;
       if (n->is_AddP()) {
-        // Search for objects which are not scalar replaceable.
-        // Mark their escape state as ArgEscape to propagate the state
-        // to referenced objects.
-        // Note: currently there are no difference in compiler optimizations
-        // for ArgEscape objects and NoEscape objects which are not
-        // scalar replaceable.
-
-        int offset = ptn->offset();
-        Node *base = get_addp_base(n);
-        ptset.Clear();
-        PointsTo(ptset, base, igvn);
-        int ptset_size = ptset.Size();
-
-        // Check if a field's initializing value is recorded and add
-        // a corresponding NULL field's value if it is not recorded.
-        // Connection Graph does not record a default initialization by NULL
-        // captured by Initialize node.
-        //
-        // Note: it will disable scalar replacement in some cases:
-        //
-        //    Point p[] = new Point[1];
-        //    p[0] = new Point(); // Will be not scalar replaced
-        //
-        // but it will save us from incorrect optimizations in next cases:
-        //
-        //    Point p[] = new Point[1];
-        //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
-        //
-        // Without a control flow analysis we can't distinguish above cases.
-        //
-        if (offset != Type::OffsetBot && ptset_size == 1) {
-          uint elem = ptset.getelem(); // Allocation node's index
-          // It does not matter if it is not Allocation node since
-          // only non-escaping allocations are scalar replaced.
-          if (ptnode_adr(elem)->_node->is_Allocate() &&
-              ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
-            AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
-            InitializeNode* ini = alloc->initialization();
-            Node* value = NULL;
-            if (ini != NULL) {
-              BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
-              Node* store = ini->find_captured_store(offset, type2aelembytes(ft), igvn);
-              if (store != NULL && store->is_Store())
-                value = store->in(MemNode::ValueIn);
-            }
-            if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
-              // A field's initializing value was not recorded. Add NULL.
-              uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
-              add_pointsto_edge(ni, null_idx);
-            }
-          }
-        }
-
-        // An object is not scalar replaceable if the field which may point
-        // to it has unknown offset (unknown element of an array of objects).
-        //
-        if (offset == Type::OffsetBot) {
-          uint e_cnt = ptn->edge_count();
-          for (uint ei = 0; ei < e_cnt; ei++) {
-            uint npi = ptn->edge_target(ei);
-            set_escape_state(npi, PointsToNode::ArgEscape);
-            ptnode_adr(npi)->_scalar_replaceable = false;
-          }
-        }
-
-        // Currently an object is not scalar replaceable if a LoadStore node
-        // access its field since the field value is unknown after it.
-        //
-        bool has_LoadStore = false;
-        for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-          Node *use = n->fast_out(i);
-          if (use->is_LoadStore()) {
-            has_LoadStore = true;
-            break;
-          }
-        }
-        // An object is not scalar replaceable if the address points
-        // to unknown field (unknown element for arrays, offset is OffsetBot).
-        //
-        // Or the address may point to more then one object. This may produce
-        // the false positive result (set scalar_replaceable to false)
-        // since the flow-insensitive escape analysis can't separate
-        // the case when stores overwrite the field's value from the case
-        // when stores happened on different control branches.
-        //
-        if (ptset_size > 1 || ptset_size != 0 &&
-            (has_LoadStore || offset == Type::OffsetBot)) {
-          for( VectorSetI j(&ptset); j.test(); ++j ) {
-            set_escape_state(j.elem, PointsToNode::ArgEscape);
-            ptnode_adr(j.elem)->_scalar_replaceable = false;
-          }
-        }
+        // Search for objects which are not scalar replaceable
+        // and adjust their escape state.
+        verify_escape_state(ni, ptset, igvn);
       }
     }
   }
@@ -1646,6 +1694,150 @@
   return has_non_escaping_obj;
 }
 
+// Search for objects which are not scalar replaceable.
+void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase) {
+  PointsToNode* ptn = ptnode_adr(nidx);
+  Node* n = ptn->_node;
+  assert(n->is_AddP(), "Should be called for AddP nodes only");
+  // Search for objects which are not scalar replaceable.
+  // Mark their escape state as ArgEscape to propagate the state
+  // to referenced objects.
+  // Note: currently there are no difference in compiler optimizations
+  // for ArgEscape objects and NoEscape objects which are not
+  // scalar replaceable.
+
+  Compile* C = _compile;
+
+  int offset = ptn->offset();
+  Node* base = get_addp_base(n);
+  ptset.Clear();
+  PointsTo(ptset, base, phase);
+  int ptset_size = ptset.Size();
+
+  // Check if a oop field's initializing value is recorded and add
+  // a corresponding NULL field's value if it is not recorded.
+  // Connection Graph does not record a default initialization by NULL
+  // captured by Initialize node.
+  //
+  // Note: it will disable scalar replacement in some cases:
+  //
+  //    Point p[] = new Point[1];
+  //    p[0] = new Point(); // Will be not scalar replaced
+  //
+  // but it will save us from incorrect optimizations in next cases:
+  //
+  //    Point p[] = new Point[1];
+  //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
+  //
+  // Do a simple control flow analysis to distinguish above cases.
+  //
+  if (offset != Type::OffsetBot && ptset_size == 1) {
+    uint elem = ptset.getelem(); // Allocation node's index
+    // It does not matter if it is not Allocation node since
+    // only non-escaping allocations are scalar replaced.
+    if (ptnode_adr(elem)->_node->is_Allocate() &&
+        ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
+      AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
+      InitializeNode* ini = alloc->initialization();
+
+      // Check only oop fields.
+      const Type* adr_type = n->as_AddP()->bottom_type();
+      BasicType basic_field_type = T_INT;
+      if (adr_type->isa_instptr()) {
+        ciField* field = C->alias_type(adr_type->isa_instptr())->field();
+        if (field != NULL) {
+          basic_field_type = field->layout_type();
+        } else {
+          // Ignore non field load (for example, klass load)
+        }
+      } else if (adr_type->isa_aryptr()) {
+        const Type* elemtype = adr_type->isa_aryptr()->elem();
+        basic_field_type = elemtype->array_element_basic_type();
+      } else {
+        // Raw pointers are used for initializing stores so skip it.
+        assert(adr_type->isa_rawptr() && base->is_Proj() &&
+               (base->in(0) == alloc),"unexpected pointer type");
+      }
+      if (basic_field_type == T_OBJECT ||
+          basic_field_type == T_NARROWOOP ||
+          basic_field_type == T_ARRAY) {
+        Node* value = NULL;
+        if (ini != NULL) {
+          BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
+          Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
+          if (store != NULL && store->is_Store()) {
+            value = store->in(MemNode::ValueIn);
+          } else if (ptn->edge_count() > 0) { // Are there oop stores?
+            // Check for a store which follows allocation without branches.
+            // For example, a volatile field store is not collected
+            // by Initialize node. TODO: it would be nice to use idom() here.
+            for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+              store = n->fast_out(i);
+              if (store->is_Store() && store->in(0) != NULL) {
+                Node* ctrl = store->in(0);
+                while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
+                        ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
+                        ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
+                   ctrl = ctrl->in(0);
+                }
+                if (ctrl == ini || ctrl == alloc) {
+                  value = store->in(MemNode::ValueIn);
+                  break;
+                }
+              }
+            }
+          }
+        }
+        if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
+          // A field's initializing value was not recorded. Add NULL.
+          uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
+          add_pointsto_edge(nidx, null_idx);
+        }
+      }
+    }
+  }
+
+  // An object is not scalar replaceable if the field which may point
+  // to it has unknown offset (unknown element of an array of objects).
+  //
+  if (offset == Type::OffsetBot) {
+    uint e_cnt = ptn->edge_count();
+    for (uint ei = 0; ei < e_cnt; ei++) {
+      uint npi = ptn->edge_target(ei);
+      set_escape_state(npi, PointsToNode::ArgEscape);
+      ptnode_adr(npi)->_scalar_replaceable = false;
+    }
+  }
+
+  // Currently an object is not scalar replaceable if a LoadStore node
+  // access its field since the field value is unknown after it.
+  //
+  bool has_LoadStore = false;
+  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+    Node *use = n->fast_out(i);
+    if (use->is_LoadStore()) {
+      has_LoadStore = true;
+      break;
+    }
+  }
+  // An object is not scalar replaceable if the address points
+  // to unknown field (unknown element for arrays, offset is OffsetBot).
+  //
+  // Or the address may point to more then one object. This may produce
+  // the false positive result (set scalar_replaceable to false)
+  // since the flow-insensitive escape analysis can't separate
+  // the case when stores overwrite the field's value from the case
+  // when stores happened on different control branches.
+  //
+  if (ptset_size > 1 || ptset_size != 0 &&
+      (has_LoadStore || offset == Type::OffsetBot)) {
+    for( VectorSetI j(&ptset); j.test(); ++j ) {
+      set_escape_state(j.elem, PointsToNode::ArgEscape);
+      ptnode_adr(j.elem)->_scalar_replaceable = false;
+    }
+  }
+}
+
 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
 
     switch (call->Opcode()) {
@@ -1657,6 +1849,7 @@
       assert(false, "should be done already");
       break;
 #endif
+    case Op_CallLeaf:
     case Op_CallLeafNoFP:
     {
       // Stub calls, objects do not escape but they are not scale replaceable.
@@ -1667,9 +1860,23 @@
         const Type* at = d->field_at(i);
         Node *arg = call->in(i)->uncast();
         const Type *aat = phase->type(arg);
-        if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr()) {
+        if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
+            ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
+
           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
                  aat->isa_ptr() != NULL, "expecting an Ptr");
+#ifdef ASSERT
+          if (!(call->Opcode() == Op_CallLeafNoFP &&
+                call->as_CallLeaf()->_name != NULL &&
+                (strstr(call->as_CallLeaf()->_name, "arraycopy")  != 0) ||
+                call->as_CallLeaf()->_name != NULL &&
+                (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
+                 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
+          ) {
+            call->dump();
+            assert(false, "EA: unexpected CallLeaf");
+          }
+#endif
           set_escape_state(arg->_idx, PointsToNode::ArgEscape);
           if (arg->is_AddP()) {
             //
@@ -1706,9 +1913,10 @@
         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
           const Type* at = d->field_at(i);
           int k = i - TypeFunc::Parms;
+          Node *arg = call->in(i)->uncast();
 
-          if (at->isa_oopptr() != NULL) {
-            Node *arg = call->in(i)->uncast();
+          if (at->isa_oopptr() != NULL &&
+              ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
 
             bool global_escapes = false;
             bool fields_escapes = false;
@@ -1942,20 +2150,23 @@
       record_for_optimizer(n);
       _processed.set(n->_idx);
     } else {
-      // Have to process call's arguments first.
+      // Don't mark as processed since call's arguments have to be processed.
       PointsToNode::NodeType nt = PointsToNode::UnknownType;
+      PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
 
       // Check if a call returns an object.
       const TypeTuple *r = n->as_Call()->tf()->range();
-      if (n->is_CallStaticJava() && r->cnt() > TypeFunc::Parms &&
+      if (r->cnt() > TypeFunc::Parms &&
+          r->field_at(TypeFunc::Parms)->isa_ptr() &&
           n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
-        // Note:  use isa_ptr() instead of isa_oopptr() here because
-        //        the _multianewarray functions return a TypeRawPtr.
-        if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
-          nt = PointsToNode::JavaObject;
+        nt = PointsToNode::JavaObject;
+        if (!n->is_CallStaticJava()) {
+          // Since the called mathod is statically unknown assume
+          // the worst case that the returned value globally escapes.
+          es = PointsToNode::GlobalEscape;
         }
       }
-      add_node(n, nt, PointsToNode::UnknownEscape, false);
+      add_node(n, nt, es, false);
     }
     return;
   }
@@ -2088,18 +2299,27 @@
     }
     case Op_Proj:
     {
-      // we are only interested in the result projection from a call
+      // we are only interested in the oop result projection from a call
       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
-        add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
-        process_call_result(n->as_Proj(), phase);
-        if (!_processed.test(n->_idx)) {
-          // The call's result may need to be processed later if the call
-          // returns it's argument and the argument is not processed yet.
-          _delayed_worklist.push(n);
+        const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
+        assert(r->cnt() > TypeFunc::Parms, "sanity");
+        if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
+          add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
+          int ti = n->in(0)->_idx;
+          // The call may not be registered yet (since not all its inputs are registered)
+          // if this is the projection from backbranch edge of Phi.
+          if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
+            process_call_result(n->as_Proj(), phase);
+          }
+          if (!_processed.test(n->_idx)) {
+            // The call's result may need to be processed later if the call
+            // returns it's argument and the argument is not processed yet.
+            _delayed_worklist.push(n);
+          }
+          break;
         }
-      } else {
-        _processed.set(n->_idx);
       }
+      _processed.set(n->_idx);
       break;
     }
     case Op_Return:
@@ -2160,6 +2380,15 @@
       }
       break;
     }
+    case Op_AryEq:
+    case Op_StrComp:
+    case Op_StrEquals:
+    case Op_StrIndexOf:
+    {
+      // char[] arrays passed to string intrinsics are not scalar replaceable.
+      add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
+      break;
+    }
     case Op_ThreadLocal:
     {
       add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
@@ -2174,6 +2403,7 @@
 
 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
   uint n_idx = n->_idx;
+  assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
 
   // Don't set processed bit for AddP, LoadP, StoreP since
   // they may need more then one pass to process.
@@ -2211,6 +2441,7 @@
     case Op_DecodeN:
     {
       int ti = n->in(1)->_idx;
+      assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
         add_pointsto_edge(n_idx, ti);
       } else {
@@ -2250,7 +2481,6 @@
 #endif
 
       Node* adr = n->in(MemNode::Address)->uncast();
-      const Type *adr_type = phase->type(adr);
       Node* adr_base;
       if (adr->is_AddP()) {
         adr_base = get_addp_base(adr);
@@ -2302,13 +2532,19 @@
     }
     case Op_Proj:
     {
-      // we are only interested in the result projection from a call
+      // we are only interested in the oop result projection from a call
       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
-        process_call_result(n->as_Proj(), phase);
-        assert(_processed.test(n_idx), "all call results should be processed");
-      } else {
-        assert(false, "Op_Proj");
+        assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
+               "all nodes should be registered");
+        const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
+        assert(r->cnt() > TypeFunc::Parms, "sanity");
+        if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
+          process_call_result(n->as_Proj(), phase);
+          assert(_processed.test(n_idx), "all call results should be processed");
+          break;
+        }
       }
+      assert(false, "Op_Proj");
       break;
     }
     case Op_Return:
@@ -2320,6 +2556,7 @@
       }
 #endif
       int ti = n->in(TypeFunc::Parms)->_idx;
+      assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
         add_pointsto_edge(n_idx, ti);
       } else {
@@ -2354,14 +2591,38 @@
       }
       break;
     }
+    case Op_AryEq:
+    case Op_StrComp:
+    case Op_StrEquals:
+    case Op_StrIndexOf:
+    {
+      // char[] arrays passed to string intrinsic do not escape but
+      // they are not scalar replaceable. Adjust escape state for them.
+      // Start from in(2) edge since in(1) is memory edge.
+      for (uint i = 2; i < n->req(); i++) {
+        Node* adr = n->in(i)->uncast();
+        const Type *at = phase->type(adr);
+        if (!adr->is_top() && at->isa_ptr()) {
+          assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
+                 at->isa_ptr() != NULL, "expecting an Ptr");
+          if (adr->is_AddP()) {
+            adr = get_addp_base(adr);
+          }
+          // Mark as ArgEscape everything "adr" could point to.
+          set_escape_state(adr->_idx, PointsToNode::ArgEscape);
+        }
+      }
+      _processed.set(n_idx);
+      break;
+    }
     case Op_ThreadLocal:
     {
       assert(false, "Op_ThreadLocal");
       break;
     }
     default:
-      ;
-      // nothing to do
+      // This method should be called only for EA specific nodes.
+      ShouldNotReachHere();
   }
 }
 
--- a/src/share/vm/opto/escape.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/escape.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -210,6 +210,8 @@
   Unique_Node_List  _delayed_worklist; // Nodes to be processed before
                                        // the call build_connection_graph().
 
+  GrowableArray<MergeMemNode *>  _mergemem_worklist; // List of all MergeMem nodes
+
   VectorSet                _processed; // Records which nodes have been
                                        // processed.
 
@@ -289,7 +291,7 @@
   bool split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn);
   PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created);
   PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn);
-  Node *find_mem(Node *mem, int alias_idx, PhaseGVN  *igvn);
+  void  move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn);
   Node *find_inst_mem(Node *mem, int alias_idx,GrowableArray<PhiNode *>  &orig_phi_worklist,  PhaseGVN  *igvn);
 
   // Propagate unique types created for unescaped allocated objects
@@ -298,7 +300,6 @@
 
   // manage entries in _node_map
   void  set_map(int idx, Node *n)        { _node_map.map(idx, n); }
-  void  set_map_phi(int idx, PhiNode *p) { _node_map.map(idx, (Node *) p); }
   Node *get_map(int idx)                 { return _node_map[idx]; }
   PhiNode *get_map_phi(int idx) {
     Node *phi = _node_map[idx];
@@ -315,6 +316,9 @@
   // Set the escape state of a node
   void set_escape_state(uint ni, PointsToNode::EscapeState es);
 
+  // Search for objects which are not scalar replaceable.
+  void verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase);
+
 public:
   ConnectionGraph(Compile *C);
 
--- a/src/share/vm/opto/graphKit.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/graphKit.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -455,16 +455,44 @@
     return Bytecodes::_illegal;
 }
 
+void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
+                                                          bool must_throw) {
+    // if the exception capability is set, then we will generate code
+    // to check the JavaThread.should_post_on_exceptions flag to see
+    // if we actually need to report exception events (for this
+    // thread).  If we don't need to report exception events, we will
+    // take the normal fast path provided by add_exception_events.  If
+    // exception event reporting is enabled for this thread, we will
+    // take the uncommon_trap in the BuildCutout below.
+
+    // first must access the should_post_on_exceptions_flag in this thread's JavaThread
+    Node* jthread = _gvn.transform(new (C, 1) ThreadLocalNode());
+    Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
+    Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false);
+
+    // Test the should_post_on_exceptions_flag vs. 0
+    Node* chk = _gvn.transform( new (C, 3) CmpINode(should_post_flag, intcon(0)) );
+    Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, BoolTest::eq) );
+
+    // Branch to slow_path if should_post_on_exceptions_flag was true
+    { BuildCutout unless(this, tst, PROB_MAX);
+      // Do not try anything fancy if we're notifying the VM on every throw.
+      // Cf. case Bytecodes::_athrow in parse2.cpp.
+      uncommon_trap(reason, Deoptimization::Action_none,
+                    (ciKlass*)NULL, (char*)NULL, must_throw);
+    }
+
+}
+
 //------------------------------builtin_throw----------------------------------
 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
   bool must_throw = true;
 
-  if (env()->jvmti_can_post_exceptions()) {
-    // Do not try anything fancy if we're notifying the VM on every throw.
-    // Cf. case Bytecodes::_athrow in parse2.cpp.
-    uncommon_trap(reason, Deoptimization::Action_none,
-                  (ciKlass*)NULL, (char*)NULL, must_throw);
-    return;
+  if (env()->jvmti_can_post_on_exceptions()) {
+    // check if we must post exception events, take uncommon trap if so
+    uncommon_trap_if_should_post_on_exceptions(reason, must_throw);
+    // here if should_post_on_exceptions is false
+    // continue on with the normal codegen
   }
 
   // If this particular condition has not yet happened at this
@@ -752,12 +780,20 @@
 
 // Helper function for enforcing certain bytecodes to reexecute if
 // deoptimization happens
-static bool should_reexecute_implied_by_bytecode(JVMState *jvms) {
+static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
   ciMethod* cur_method = jvms->method();
   int       cur_bci   = jvms->bci();
   if (cur_method != NULL && cur_bci != InvocationEntryBci) {
     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
-    return Interpreter::bytecode_should_reexecute(code);
+    return Interpreter::bytecode_should_reexecute(code) ||
+           is_anewarray && code == Bytecodes::_multianewarray;
+    // Reexecute _multianewarray bytecode which was replaced with
+    // sequence of [a]newarray. See Parse::do_multianewarray().
+    //
+    // Note: interpreter should not have it set since this optimization
+    // is limited by dimensions and guarded by flag so in some cases
+    // multianewarray() runtime calls will be generated and
+    // the bytecode should not be reexecutes (stack will not be reset).
   } else
     return false;
 }
@@ -808,7 +844,7 @@
   // For a known set of bytecodes, the interpreter should reexecute them if
   // deoptimization happens. We set the reexecute state for them here
   if (out_jvms->is_reexecute_undefined() && //don't change if already specified
-      should_reexecute_implied_by_bytecode(out_jvms)) {
+      should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {
     out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
   }
 
@@ -981,14 +1017,19 @@
   case Bytecodes::_invokedynamic:
   case Bytecodes::_invokeinterface:
     {
-      bool is_static = (depth == 0);
       bool ignore;
       ciBytecodeStream iter(method());
       iter.reset_to_bci(bci());
       iter.next();
       ciMethod* method = iter.get_method(ignore);
       inputs = method->arg_size_no_receiver();
-      if (!is_static)  inputs += 1;
+      // Add a receiver argument, maybe:
+      if (code != Bytecodes::_invokestatic &&
+          code != Bytecodes::_invokedynamic)
+        inputs += 1;
+      // (Do not use ciMethod::arg_size(), because
+      // it might be an unloaded method, which doesn't
+      // know whether it is static or not.)
       int size = method->return_type()->size();
       depth = size - inputs;
     }
@@ -1351,8 +1392,8 @@
 }
 
 //------------------------------set_all_memory_call----------------------------
-void GraphKit::set_all_memory_call(Node* call) {
-  Node* newmem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
+void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
+  Node* newmem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory, separate_io_proj) );
   set_all_memory(newmem);
 }
 
@@ -1573,7 +1614,7 @@
 //---------------------------set_edges_for_java_call---------------------------
 // Connect a newly created call into the current JVMS.
 // A return value node (if any) is returned from set_edges_for_java_call.
-void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw) {
+void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
 
   // Add the predefined inputs:
   call->init_req( TypeFunc::Control, control() );
@@ -1595,13 +1636,13 @@
   // Re-use the current map to produce the result.
 
   set_control(_gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Control)));
-  set_i_o(    _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O    )));
-  set_all_memory_call(xcall);
+  set_i_o(    _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
+  set_all_memory_call(xcall, separate_io_proj);
 
   //return xcall;   // no need, caller already has it
 }
 
-Node* GraphKit::set_results_for_java_call(CallJavaNode* call) {
+Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
   if (stopped())  return top();  // maybe the call folded up?
 
   // Capture the return value, if any.
@@ -1614,8 +1655,15 @@
   // Note:  Since any out-of-line call can produce an exception,
   // we always insert an I_O projection from the call into the result.
 
-  make_slow_call_ex(call, env()->Throwable_klass(), false);
-
+  make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
+
+  if (separate_io_proj) {
+    // The caller requested separate projections be used by the fall
+    // through and exceptional paths, so replace the projections for
+    // the fall through path.
+    set_i_o(_gvn.transform( new (C, 1) ProjNode(call, TypeFunc::I_O) ));
+    set_all_memory(_gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) ));
+  }
   return ret;
 }
 
@@ -1678,6 +1726,64 @@
   }
 }
 
+
+// Replace the call with the current state of the kit.
+void GraphKit::replace_call(CallNode* call, Node* result) {
+  JVMState* ejvms = NULL;
+  if (has_exceptions()) {
+    ejvms = transfer_exceptions_into_jvms();
+  }
+
+  SafePointNode* final_state = stop();
+
+  // Find all the needed outputs of this call
+  CallProjections callprojs;
+  call->extract_projections(&callprojs, true);
+
+  // Replace all the old call edges with the edges from the inlining result
+  C->gvn_replace_by(callprojs.fallthrough_catchproj, final_state->in(TypeFunc::Control));
+  C->gvn_replace_by(callprojs.fallthrough_memproj,   final_state->in(TypeFunc::Memory));
+  C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_state->in(TypeFunc::I_O));
+
+  // Replace the result with the new result if it exists and is used
+  if (callprojs.resproj != NULL && result != NULL) {
+    C->gvn_replace_by(callprojs.resproj, result);
+  }
+
+  if (ejvms == NULL) {
+    // No exception edges to simply kill off those paths
+    C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
+    C->gvn_replace_by(callprojs.catchall_memproj,   C->top());
+    C->gvn_replace_by(callprojs.catchall_ioproj,    C->top());
+
+    // Replace the old exception object with top
+    if (callprojs.exobj != NULL) {
+      C->gvn_replace_by(callprojs.exobj, C->top());
+    }
+  } else {
+    GraphKit ekit(ejvms);
+
+    // Load my combined exception state into the kit, with all phis transformed:
+    SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
+
+    Node* ex_oop = ekit.use_exception_state(ex_map);
+
+    C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
+    C->gvn_replace_by(callprojs.catchall_memproj,   ekit.reset_memory());
+    C->gvn_replace_by(callprojs.catchall_ioproj,    ekit.i_o());
+
+    // Replace the old exception object with the newly created one
+    if (callprojs.exobj != NULL) {
+      C->gvn_replace_by(callprojs.exobj, ex_oop);
+    }
+  }
+
+  // Disconnect the call from the graph
+  call->disconnect_inputs(NULL);
+  C->gvn_replace_by(call, C->top());
+}
+
+
 //------------------------------increment_counter------------------------------
 // for statistics: increment a VM counter by 1
 
@@ -3189,9 +3295,10 @@
   if (use_ReduceInitialCardMarks()
       && obj == just_allocated_object(control())) {
     // We can skip marks on a freshly-allocated object in Eden.
-    // Keep this code in sync with maybe_defer_card_mark() in runtime.cpp.
-    // That routine informs GC to take appropriate compensating steps
-    // so as to make this card-mark elision safe.
+    // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
+    // That routine informs GC to take appropriate compensating steps,
+    // upon a slow-path allocation, so as to make this card-mark
+    // elision safe.
     return;
   }
 
@@ -3459,4 +3566,3 @@
   sync_kit(ideal);
 }
 #undef __
-
--- a/src/share/vm/opto/graphKit.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/graphKit.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -251,6 +251,11 @@
   // via an uncommon trap.
   void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
 
+  // Helper to check the JavaThread::_should_post_on_exceptions flag
+  // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
+  void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
+                                                  bool must_throw) ;
+
   // Helper Functions for adding debug information
   void kill_dead_locals();
 #ifdef ASSERT
@@ -279,6 +284,34 @@
   }
   Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
 
+
+  // Some convenient shortcuts for common nodes
+  Node* IfTrue(IfNode* iff)                   { return _gvn.transform(new (C,1) IfTrueNode(iff));      }
+  Node* IfFalse(IfNode* iff)                  { return _gvn.transform(new (C,1) IfFalseNode(iff));     }
+
+  Node* AddI(Node* l, Node* r)                { return _gvn.transform(new (C,3) AddINode(l, r));       }
+  Node* SubI(Node* l, Node* r)                { return _gvn.transform(new (C,3) SubINode(l, r));       }
+  Node* MulI(Node* l, Node* r)                { return _gvn.transform(new (C,3) MulINode(l, r));       }
+  Node* DivI(Node* ctl, Node* l, Node* r)     { return _gvn.transform(new (C,3) DivINode(ctl, l, r));  }
+
+  Node* AndI(Node* l, Node* r)                { return _gvn.transform(new (C,3) AndINode(l, r));       }
+  Node* OrI(Node* l, Node* r)                 { return _gvn.transform(new (C,3) OrINode(l, r));        }
+  Node* XorI(Node* l, Node* r)                { return _gvn.transform(new (C,3) XorINode(l, r));       }
+
+  Node* MaxI(Node* l, Node* r)                { return _gvn.transform(new (C,3) MaxINode(l, r));       }
+  Node* MinI(Node* l, Node* r)                { return _gvn.transform(new (C,3) MinINode(l, r));       }
+
+  Node* LShiftI(Node* l, Node* r)             { return _gvn.transform(new (C,3) LShiftINode(l, r));    }
+  Node* RShiftI(Node* l, Node* r)             { return _gvn.transform(new (C,3) RShiftINode(l, r));    }
+  Node* URShiftI(Node* l, Node* r)            { return _gvn.transform(new (C,3) URShiftINode(l, r));   }
+
+  Node* CmpI(Node* l, Node* r)                { return _gvn.transform(new (C,3) CmpINode(l, r));       }
+  Node* CmpL(Node* l, Node* r)                { return _gvn.transform(new (C,3) CmpLNode(l, r));       }
+  Node* CmpP(Node* l, Node* r)                { return _gvn.transform(new (C,3) CmpPNode(l, r));       }
+  Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C,2) BoolNode(cmp, relop)); }
+
+  Node* AddP(Node* b, Node* a, Node* o)       { return _gvn.transform(new (C,4) AddPNode(b, a, o));    }
+
   // Convert between int and long, and size_t.
   // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
   Node* ConvI2L(Node* offset);
@@ -400,7 +433,7 @@
   void set_all_memory(Node* newmem);
 
   // Create a memory projection from the call, then set_all_memory.
-  void set_all_memory_call(Node* call);
+  void set_all_memory_call(Node* call, bool separate_io_proj = false);
 
   // Create a LoadNode, reading from the parser's memory state.
   // (Note:  require_atomic_access is useful only with T_LONG.)
@@ -543,12 +576,12 @@
   // Transform the call, and update the basics: control, i_o, memory.
   // (The next step is usually to call set_results_for_java_call.)
   void set_edges_for_java_call(CallJavaNode* call,
-                               bool must_throw = false);
+                               bool must_throw = false, bool separate_io_proj = false);
 
   // Finish up a java call that was started by set_edges_for_java_call.
   // Call add_exception on any throw arising from the call.
   // Return the call result (transformed).
-  Node* set_results_for_java_call(CallJavaNode* call);
+  Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
 
   // Similar to set_edges_for_java_call, but simplified for runtime calls.
   void  set_predefined_output_for_runtime_call(Node* call) {
@@ -559,6 +592,11 @@
                                                const TypePtr* hook_mem);
   Node* set_predefined_input_for_runtime_call(SafePointNode* call);
 
+  // Replace the call with the current state of the kit.  Requires
+  // that the call was generated with separate io_projs so that
+  // exceptional control flow can be handled properly.
+  void replace_call(CallNode* call, Node* result);
+
   // helper functions for statistics
   void increment_counter(address counter_addr);   // increment a debug counter
   void increment_counter(Node*   counter_addr);   // increment a debug counter
--- a/src/share/vm/opto/ifg.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/ifg.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/ifnode.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/ifnode.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -531,6 +531,9 @@
   if (linear_only)
     return NULL;
 
+  if( dom->is_Root() )
+    return NULL;
+
   // Else hit a Region.  Check for a loop header
   if( dom->is_Loop() )
     return dom->in(1);          // Skip up thru loops
--- a/src/share/vm/opto/lcm.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/lcm.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -120,6 +120,7 @@
     case Op_LoadRange:
     case Op_LoadD_unaligned:
     case Op_LoadL_unaligned:
+      assert(mach->in(2) == val, "should be address");
       break;
     case Op_StoreB:
     case Op_StoreC:
@@ -146,6 +147,21 @@
     default:                    // Also check for embedded loads
       if( !mach->needs_anti_dependence_check() )
         continue;               // Not an memory op; skip it
+      {
+        // Check that value is used in memory address.
+        Node* base;
+        Node* index;
+        const MachOper* oper = mach->memory_inputs(base, index);
+        if (oper == NULL || oper == (MachOper*)-1) {
+          continue;             // Not an memory op; skip it
+        }
+        if (val == base ||
+            val == index && val->bottom_type()->isa_narrowoop()) {
+          break;                // Found it
+        } else {
+          continue;             // Skip it
+        }
+      }
       break;
     }
     // check if the offset is not too high for implicit exception
@@ -542,6 +558,16 @@
   // pointers as far as the kill mask goes.
   bool exclude_soe = op == Op_CallRuntime;
 
+  // If the call is a MethodHandle invoke, we need to exclude the
+  // register which is used to save the SP value over MH invokes from
+  // the mask.  Otherwise this register could be used for
+  // deoptimization information.
+  if (op == Op_CallStaticJava) {
+    MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall;
+    if (mcallstaticjava->_method_handle_invoke)
+      proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask());
+  }
+
   // Fill in the kill mask for the call
   for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
     if( !regs.Member(r) ) {     // Not already defined by the call
@@ -616,8 +642,9 @@
           assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
         }
       }
-      if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire &&
-          n->req() > TypeFunc::Parms ) {
+      if( n->is_Mach() && n->req() > TypeFunc::Parms &&
+          (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
+           n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
         // MemBarAcquire could be created without Precedent edge.
         // del_req() replaces the specified edge with the last input edge
         // and then removes the last edge. If the specified edge > number of
--- a/src/share/vm/opto/library_call.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/library_call.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3697,12 +3697,14 @@
 
 // Helper routine for above
 bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
+  ciMethod* method = jvms->method();
+
   // Is this the Method.invoke method itself?
-  if (jvms->method()->intrinsic_id() == vmIntrinsics::_invoke)
+  if (method->intrinsic_id() == vmIntrinsics::_invoke)
     return true;
 
   // Is this a helper, defined somewhere underneath MethodAccessorImpl.
-  ciKlass* k = jvms->method()->holder();
+  ciKlass* k = method->holder();
   if (k->is_instance_klass()) {
     ciInstanceKlass* ik = k->as_instance_klass();
     for (; ik != NULL; ik = ik->super()) {
@@ -3712,6 +3714,10 @@
       }
     }
   }
+  else if (method->is_method_handle_adapter()) {
+    // This is an internal adapter frame from the MethodHandleCompiler -- skip it
+    return true;
+  }
 
   return false;
 }
--- a/src/share/vm/opto/loopTransform.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/loopTransform.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -549,6 +549,10 @@
       // Comparing trip+off vs limit
       Node *bol = iff->in(1);
       if( bol->req() != 2 ) continue; // dead constant test
+      if (!bol->is_Bool()) {
+        assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
+        continue;
+      }
       Node *cmp = bol->in(1);
 
       Node *rc_exp = cmp->in(1);
@@ -875,7 +879,7 @@
 //------------------------------is_invariant-----------------------------
 // Return true if n is invariant
 bool IdealLoopTree::is_invariant(Node* n) const {
-  Node *n_c = _phase->get_ctrl(n);
+  Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
   if (n_c->is_top()) return false;
   return !is_member(_phase->get_loop(n_c));
 }
@@ -1594,7 +1598,7 @@
 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
   // Check and remove empty loops (spam micro-benchmarks)
   if( policy_do_remove_empty_loop(phase) )
-    return true;                     // Here we removed an empty loop
+    return true;  // Here we removed an empty loop
 
   bool should_peel = policy_peeling(phase); // Should we peel?
 
@@ -1688,8 +1692,8 @@
     // an even number of trips).  If we are peeling, we might enable some RCE
     // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
     // peeling.
-    if( should_unroll && !should_peel )
-      phase->do_unroll(this,old_new, true);
+      if( should_unroll && !should_peel )
+        phase->do_unroll(this,old_new, true);
 
     // Adjust the pre-loop limits to align the main body
     // iterations.
@@ -1731,9 +1735,9 @@
       _allow_optimizations &&
       !tail()->is_top() ) {     // Also ignore the occasional dead backedge
     if (!_has_call) {
-      if (!iteration_split_impl( phase, old_new )) {
-        return false;
-      }
+        if (!iteration_split_impl( phase, old_new )) {
+          return false;
+        }
     } else if (policy_unswitching(phase)) {
       phase->do_unswitching(this, old_new);
     }
@@ -1746,3 +1750,576 @@
     return false;
   return true;
 }
+
+//-------------------------------is_uncommon_trap_proj----------------------------
+// Return true if proj is the form of "proj->[region->..]call_uct"
+bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate) {
+  int path_limit = 10;
+  assert(proj, "invalid argument");
+  Node* out = proj;
+  for (int ct = 0; ct < path_limit; ct++) {
+    out = out->unique_ctrl_out();
+    if (out == NULL || out->is_Root() || out->is_Start())
+      return false;
+    if (out->is_CallStaticJava()) {
+      int req = out->as_CallStaticJava()->uncommon_trap_request();
+      if (req != 0) {
+        Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(req);
+        if (!must_reason_predicate || reason == Deoptimization::Reason_predicate){
+           return true;
+        }
+      }
+      return false; // don't do further after call
+    }
+  }
+  return false;
+}
+
+//-------------------------------is_uncommon_trap_if_pattern-------------------------
+// Return true  for "if(test)-> proj -> ...
+//                          |
+//                          V
+//                      other_proj->[region->..]call_uct"
+//
+// "must_reason_predicate" means the uct reason must be Reason_predicate
+bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, bool must_reason_predicate) {
+  Node *in0 = proj->in(0);
+  if (!in0->is_If()) return false;
+  IfNode* iff = in0->as_If();
+
+  // we need "If(Conv2B(Opaque1(...)))" pattern for must_reason_predicate
+  if (must_reason_predicate) {
+    if (iff->in(1)->Opcode() != Op_Conv2B ||
+       iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
+      return false;
+    }
+  }
+
+  ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj();
+  return is_uncommon_trap_proj(other_proj, must_reason_predicate);
+}
+
+//------------------------------create_new_if_for_predicate------------------------
+// create a new if above the uct_if_pattern for the predicate to be promoted.
+//
+//          before                                after
+//        ----------                           ----------
+//           ctrl                                 ctrl
+//            |                                     |
+//            |                                     |
+//            v                                     v
+//           iff                                 new_iff
+//          /    \                                /      \
+//         /      \                              /        \
+//        v        v                            v          v
+//  uncommon_proj cont_proj                   if_uct     if_cont
+// \      |        |                           |          |
+//  \     |        |                           |          |
+//   v    v        v                           |          v
+//     rgn       loop                          |         iff
+//      |                                      |        /     \
+//      |                                      |       /       \
+//      v                                      |      v         v
+// uncommon_trap                               | uncommon_proj cont_proj
+//                                           \  \    |           |
+//                                            \  \   |           |
+//                                             v  v  v           v
+//                                               rgn           loop
+//                                                |
+//                                                |
+//                                                v
+//                                           uncommon_trap
+//
+//
+// We will create a region to guard the uct call if there is no one there.
+// The true projecttion (if_cont) of the new_iff is returned.
+ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj) {
+  assert(is_uncommon_trap_if_pattern(cont_proj, true), "must be a uct if pattern!");
+  IfNode* iff = cont_proj->in(0)->as_If();
+
+  ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
+  Node     *rgn   = uncommon_proj->unique_ctrl_out();
+  assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
+
+  if (!rgn->is_Region()) { // create a region to guard the call
+    assert(rgn->is_Call(), "must be call uct");
+    CallNode* call = rgn->as_Call();
+    rgn = new (C, 1) RegionNode(1);
+    _igvn.set_type(rgn, rgn->bottom_type());
+    rgn->add_req(uncommon_proj);
+    set_idom(rgn, idom(uncommon_proj), dom_depth(uncommon_proj)+1);
+    _igvn.hash_delete(call);
+    call->set_req(0, rgn);
+  }
+
+  // Create new_iff
+  uint  iffdd  = dom_depth(iff);
+  IdealLoopTree* lp = get_loop(iff);
+  IfNode *new_iff = new (C, 2) IfNode(iff->in(0), NULL, iff->_prob, iff->_fcnt);
+  register_node(new_iff, lp, idom(iff), iffdd);
+  Node *if_cont = new (C, 1) IfTrueNode(new_iff);
+  Node *if_uct  = new (C, 1) IfFalseNode(new_iff);
+  if (cont_proj->is_IfFalse()) {
+    // Swap
+    Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp;
+  }
+  register_node(if_cont, lp, new_iff, iffdd);
+  register_node(if_uct, get_loop(rgn), new_iff, iffdd);
+
+  // if_cont to iff
+  _igvn.hash_delete(iff);
+  iff->set_req(0, if_cont);
+  set_idom(iff, if_cont, dom_depth(iff));
+
+  // if_uct to rgn
+  _igvn.hash_delete(rgn);
+  rgn->add_req(if_uct);
+  Node* ridom = idom(rgn);
+  Node* nrdom = dom_lca(ridom, new_iff);
+  set_idom(rgn, nrdom, dom_depth(rgn));
+
+  // rgn must have no phis
+  assert(!rgn->as_Region()->has_phi(), "region must have no phis");
+
+  return if_cont->as_Proj();
+}
+
+//------------------------------find_predicate_insertion_point--------------------------
+// Find a good location to insert a predicate
+ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c) {
+  if (start_c == C->root() || !start_c->is_Proj())
+    return NULL;
+  if (is_uncommon_trap_if_pattern(start_c->as_Proj(), true/*Reason_Predicate*/)) {
+    return start_c->as_Proj();
+  }
+  return NULL;
+}
+
+//------------------------------Invariance-----------------------------------
+// Helper class for loop_predication_impl to compute invariance on the fly and
+// clone invariants.
+class Invariance : public StackObj {
+  VectorSet _visited, _invariant;
+  Node_Stack _stack;
+  VectorSet _clone_visited;
+  Node_List _old_new; // map of old to new (clone)
+  IdealLoopTree* _lpt;
+  PhaseIdealLoop* _phase;
+
+  // Helper function to set up the invariance for invariance computation
+  // If n is a known invariant, set up directly. Otherwise, look up the
+  // the possibility to push n onto the stack for further processing.
+  void visit(Node* use, Node* n) {
+    if (_lpt->is_invariant(n)) { // known invariant
+      _invariant.set(n->_idx);
+    } else if (!n->is_CFG()) {
+      Node *n_ctrl = _phase->ctrl_or_self(n);
+      Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
+      if (_phase->is_dominator(n_ctrl, u_ctrl)) {
+        _stack.push(n, n->in(0) == NULL ? 1 : 0);
+      }
+    }
+  }
+
+  // Compute invariance for "the_node" and (possibly) all its inputs recursively
+  // on the fly
+  void compute_invariance(Node* n) {
+    assert(_visited.test(n->_idx), "must be");
+    visit(n, n);
+    while (_stack.is_nonempty()) {
+      Node*  n = _stack.node();
+      uint idx = _stack.index();
+      if (idx == n->req()) { // all inputs are processed
+        _stack.pop();
+        // n is invariant if it's inputs are all invariant
+        bool all_inputs_invariant = true;
+        for (uint i = 0; i < n->req(); i++) {
+          Node* in = n->in(i);
+          if (in == NULL) continue;
+          assert(_visited.test(in->_idx), "must have visited input");
+          if (!_invariant.test(in->_idx)) { // bad guy
+            all_inputs_invariant = false;
+            break;
+          }
+        }
+        if (all_inputs_invariant) {
+          _invariant.set(n->_idx); // I am a invariant too
+        }
+      } else { // process next input
+        _stack.set_index(idx + 1);
+        Node* m = n->in(idx);
+        if (m != NULL && !_visited.test_set(m->_idx)) {
+          visit(n, m);
+        }
+      }
+    }
+  }
+
+  // Helper function to set up _old_new map for clone_nodes.
+  // If n is a known invariant, set up directly ("clone" of n == n).
+  // Otherwise, push n onto the stack for real cloning.
+  void clone_visit(Node* n) {
+    assert(_invariant.test(n->_idx), "must be invariant");
+    if (_lpt->is_invariant(n)) { // known invariant
+      _old_new.map(n->_idx, n);
+    } else{ // to be cloned
+      assert (!n->is_CFG(), "should not see CFG here");
+      _stack.push(n, n->in(0) == NULL ? 1 : 0);
+    }
+  }
+
+  // Clone "n" and (possibly) all its inputs recursively
+  void clone_nodes(Node* n, Node* ctrl) {
+    clone_visit(n);
+    while (_stack.is_nonempty()) {
+      Node*  n = _stack.node();
+      uint idx = _stack.index();
+      if (idx == n->req()) { // all inputs processed, clone n!
+        _stack.pop();
+        // clone invariant node
+        Node* n_cl = n->clone();
+        _old_new.map(n->_idx, n_cl);
+        _phase->register_new_node(n_cl, ctrl);
+        for (uint i = 0; i < n->req(); i++) {
+          Node* in = n_cl->in(i);
+          if (in == NULL) continue;
+          n_cl->set_req(i, _old_new[in->_idx]);
+        }
+      } else { // process next input
+        _stack.set_index(idx + 1);
+        Node* m = n->in(idx);
+        if (m != NULL && !_clone_visited.test_set(m->_idx)) {
+          clone_visit(m); // visit the input
+        }
+      }
+    }
+  }
+
+ public:
+  Invariance(Arena* area, IdealLoopTree* lpt) :
+    _lpt(lpt), _phase(lpt->_phase),
+    _visited(area), _invariant(area), _stack(area, 10 /* guess */),
+    _clone_visited(area), _old_new(area)
+  {}
+
+  // Map old to n for invariance computation and clone
+  void map_ctrl(Node* old, Node* n) {
+    assert(old->is_CFG() && n->is_CFG(), "must be");
+    _old_new.map(old->_idx, n); // "clone" of old is n
+    _invariant.set(old->_idx);  // old is invariant
+    _clone_visited.set(old->_idx);
+  }
+
+  // Driver function to compute invariance
+  bool is_invariant(Node* n) {
+    if (!_visited.test_set(n->_idx))
+      compute_invariance(n);
+    return (_invariant.test(n->_idx) != 0);
+  }
+
+  // Driver function to clone invariant
+  Node* clone(Node* n, Node* ctrl) {
+    assert(ctrl->is_CFG(), "must be");
+    assert(_invariant.test(n->_idx), "must be an invariant");
+    if (!_clone_visited.test(n->_idx))
+      clone_nodes(n, ctrl);
+    return _old_new[n->_idx];
+  }
+};
+
+//------------------------------is_range_check_if -----------------------------------
+// Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format
+// Note: this function is particularly designed for loop predication. We require load_range
+//       and offset to be loop invariant computed on the fly by "invar"
+bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const {
+  if (!is_loop_exit(iff)) {
+    return false;
+  }
+  if (!iff->in(1)->is_Bool()) {
+    return false;
+  }
+  const BoolNode *bol = iff->in(1)->as_Bool();
+  if (bol->_test._test != BoolTest::lt) {
+    return false;
+  }
+  if (!bol->in(1)->is_Cmp()) {
+    return false;
+  }
+  const CmpNode *cmp = bol->in(1)->as_Cmp();
+  if (cmp->Opcode() != Op_CmpU ) {
+    return false;
+  }
+  if (cmp->in(2)->Opcode() != Op_LoadRange) {
+    return false;
+  }
+  LoadRangeNode* lr = (LoadRangeNode*)cmp->in(2);
+  if (!invar.is_invariant(lr)) { // loadRange must be invariant
+    return false;
+  }
+  Node *iv     = _head->as_CountedLoop()->phi();
+  int   scale  = 0;
+  Node *offset = NULL;
+  if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) {
+    return false;
+  }
+  if(offset && !invar.is_invariant(offset)) { // offset must be invariant
+    return false;
+  }
+  return true;
+}
+
+//------------------------------rc_predicate-----------------------------------
+// Create a range check predicate
+//
+// for (i = init; i < limit; i += stride) {
+//    a[scale*i+offset]
+// }
+//
+// Compute max(scale*i + offset) for init <= i < limit and build the predicate
+// as "max(scale*i + offset) u< a.length".
+//
+// There are two cases for max(scale*i + offset):
+// (1) stride*scale > 0
+//   max(scale*i + offset) = scale*(limit-stride) + offset
+// (2) stride*scale < 0
+//   max(scale*i + offset) = scale*init + offset
+BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl,
+                                       int scale, Node* offset,
+                                       Node* init, Node* limit, Node* stride,
+                                       Node* range) {
+  Node* max_idx_expr  = init;
+  int stride_con = stride->get_int();
+  if ((stride_con > 0) == (scale > 0)) {
+    max_idx_expr = new (C, 3) SubINode(limit, stride);
+    register_new_node(max_idx_expr, ctrl);
+  }
+
+  if (scale != 1) {
+    ConNode* con_scale = _igvn.intcon(scale);
+    max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale);
+    register_new_node(max_idx_expr, ctrl);
+  }
+
+  if (offset && (!offset->is_Con() || offset->get_int() != 0)){
+    max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset);
+    register_new_node(max_idx_expr, ctrl);
+  }
+
+  CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range);
+  register_new_node(cmp, ctrl);
+  BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt);
+  register_new_node(bol, ctrl);
+  return bol;
+}
+
+//------------------------------ loop_predication_impl--------------------------
+// Insert loop predicates for null checks and range checks
+bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
+  if (!UseLoopPredicate) return false;
+
+  // Too many traps seen?
+  bool tmt = C->too_many_traps(C->method(), 0, Deoptimization::Reason_predicate);
+  int tc = C->trap_count(Deoptimization::Reason_predicate);
+  if (tmt || tc > 0) {
+    if (TraceLoopPredicate) {
+      tty->print_cr("too many predicate traps: %d", tc);
+      C->method()->print(); // which method has too many predicate traps
+      tty->print_cr("");
+    }
+    return false;
+  }
+
+  CountedLoopNode *cl = NULL;
+  if (loop->_head->is_CountedLoop()) {
+    cl = loop->_head->as_CountedLoop();
+    // do nothing for iteration-splitted loops
+    if(!cl->is_normal_loop()) return false;
+  }
+
+  LoopNode *lpn  = loop->_head->as_Loop();
+  Node* entry = lpn->in(LoopNode::EntryControl);
+
+  ProjNode *predicate_proj = find_predicate_insertion_point(entry);
+  if (!predicate_proj){
+#ifndef PRODUCT
+    if (TraceLoopPredicate) {
+      tty->print("missing predicate:");
+      loop->dump_head();
+    }
+#endif
+    return false;
+  }
+
+  ConNode* zero = _igvn.intcon(0);
+  set_ctrl(zero, C->root());
+  Node *cond_false = new (C, 2) Conv2BNode(zero);
+  register_new_node(cond_false, C->root());
+  ConNode* one = _igvn.intcon(1);
+  set_ctrl(one, C->root());
+  Node *cond_true = new (C, 2) Conv2BNode(one);
+  register_new_node(cond_true, C->root());
+
+  ResourceArea *area = Thread::current()->resource_area();
+  Invariance invar(area, loop);
+
+  // Create list of if-projs such that a newer proj dominates all older
+  // projs in the list, and they all dominate loop->tail()
+  Node_List if_proj_list(area);
+  LoopNode *head  = loop->_head->as_Loop();
+  Node *current_proj = loop->tail(); //start from tail
+  while ( current_proj != head ) {
+    if (loop == get_loop(current_proj) && // still in the loop ?
+        current_proj->is_Proj()        && // is a projection  ?
+        current_proj->in(0)->Opcode() == Op_If) { // is a if projection ?
+      if_proj_list.push(current_proj);
+    }
+    current_proj = idom(current_proj);
+  }
+
+  bool hoisted = false; // true if at least one proj is promoted
+  while (if_proj_list.size() > 0) {
+    // Following are changed to nonnull when a predicate can be hoisted
+    ProjNode* new_predicate_proj = NULL;
+    BoolNode* new_predicate_bol   = NULL;
+
+    ProjNode* proj = if_proj_list.pop()->as_Proj();
+    IfNode*   iff  = proj->in(0)->as_If();
+
+    if (!is_uncommon_trap_if_pattern(proj)) {
+      if (loop->is_loop_exit(iff)) {
+        // stop processing the remaining projs in the list because the execution of them
+        // depends on the condition of "iff" (iff->in(1)).
+        break;
+      } else {
+        // Both arms are inside the loop. There are two cases:
+        // (1) there is one backward branch. In this case, any remaining proj
+        //     in the if_proj list post-dominates "iff". So, the condition of "iff"
+        //     does not determine the execution the remining projs directly, and we
+        //     can safely continue.
+        // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj"
+        //     does not dominate loop->tail(), so it can not be in the if_proj list.
+        continue;
+      }
+    }
+
+    Node*     test = iff->in(1);
+    if (!test->is_Bool()){ //Conv2B, ...
+      continue;
+    }
+    BoolNode* bol = test->as_Bool();
+    if (invar.is_invariant(bol)) {
+      // Invariant test
+      new_predicate_proj = create_new_if_for_predicate(predicate_proj);
+      Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
+      new_predicate_bol  = invar.clone(bol, ctrl)->as_Bool();
+      if (TraceLoopPredicate) tty->print("invariant");
+    } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
+      // Range check (only for counted loops)
+      new_predicate_proj = create_new_if_for_predicate(predicate_proj);
+      Node *ctrl = new_predicate_proj->in(0)->as_If()->in(0);
+      const Node*    cmp    = bol->in(1)->as_Cmp();
+      Node*          idx    = cmp->in(1);
+      assert(!invar.is_invariant(idx), "index is variant");
+      assert(cmp->in(2)->Opcode() == Op_LoadRange, "must be");
+      LoadRangeNode* ld_rng = (LoadRangeNode*)cmp->in(2); // LoadRangeNode
+      assert(invar.is_invariant(ld_rng), "load range must be invariant");
+      ld_rng = (LoadRangeNode*)invar.clone(ld_rng, ctrl);
+      int scale    = 1;
+      Node* offset = zero;
+      bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
+      assert(ok, "must be index expression");
+      if (offset && offset != zero) {
+        assert(invar.is_invariant(offset), "offset must be loop invariant");
+        offset = invar.clone(offset, ctrl);
+      }
+      Node* init    = cl->init_trip();
+      Node* limit   = cl->limit();
+      Node* stride  = cl->stride();
+      new_predicate_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng);
+      if (TraceLoopPredicate) tty->print("range check");
+    }
+
+    if (new_predicate_proj == NULL) {
+      // The other proj of the "iff" is a uncommon trap projection, and we can assume
+      // the other proj will not be executed ("executed" means uct raised).
+      continue;
+    } else {
+      // Success - attach condition (new_predicate_bol) to predicate if
+      invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
+      IfNode* new_iff = new_predicate_proj->in(0)->as_If();
+
+      // Negate test if necessary
+      if (proj->_con != predicate_proj->_con) {
+        new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
+        register_new_node(new_predicate_bol, new_iff->in(0));
+        if (TraceLoopPredicate) tty->print_cr(" if negated: %d", iff->_idx);
+      } else {
+        if (TraceLoopPredicate) tty->print_cr(" if: %d", iff->_idx);
+      }
+
+      _igvn.hash_delete(new_iff);
+      new_iff->set_req(1, new_predicate_bol);
+
+      _igvn.hash_delete(iff);
+      iff->set_req(1, proj->is_IfFalse() ? cond_false : cond_true);
+
+      Node* ctrl = new_predicate_proj; // new control
+      ProjNode* dp = proj;     // old control
+      assert(get_loop(dp) == loop, "guarenteed at the time of collecting proj");
+      // Find nodes (depends only on the test) off the surviving projection;
+      // move them outside the loop with the control of proj_clone
+      for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
+        Node* cd = dp->fast_out(i); // Control-dependent node
+        if (cd->depends_only_on_test()) {
+          assert(cd->in(0) == dp, "");
+          _igvn.hash_delete(cd);
+          cd->set_req(0, ctrl); // ctrl, not NULL
+          set_early_ctrl(cd);
+          _igvn._worklist.push(cd);
+          IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
+          if (new_loop != loop) {
+            if (!loop->_child) loop->_body.yank(cd);
+            if (!new_loop->_child ) new_loop->_body.push(cd);
+          }
+          --i;
+          --imax;
+        }
+      }
+
+      hoisted = true;
+      C->set_major_progress();
+    }
+  } // end while
+
+#ifndef PRODUCT
+    // report that the loop predication has been actually performed
+    // for this loop
+    if (TraceLoopPredicate && hoisted) {
+      tty->print("Loop Predication Performed:");
+      loop->dump_head();
+    }
+#endif
+
+  return hoisted;
+}
+
+//------------------------------loop_predication--------------------------------
+// driver routine for loop predication optimization
+bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) {
+  bool hoisted = false;
+  // Recursively promote predicates
+  if ( _child ) {
+    hoisted = _child->loop_predication( phase);
+  }
+
+  // self
+  if (!_irreducible && !tail()->is_top()) {
+    hoisted |= phase->loop_predication_impl(this);
+  }
+
+  if ( _next ) { //sibling
+    hoisted |= _next->loop_predication( phase);
+  }
+
+  return hoisted;
+}
--- a/src/share/vm/opto/loopnode.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/loopnode.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1279,7 +1279,8 @@
     // Visit all children, looking for Phis
     for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
       Node *out = cl->out(i);
-      if (!out->is_Phi() || out == phi)  continue; // Looking for other phis
+      // Look for other phis (secondary IVs). Skip dead ones
+      if (!out->is_Phi() || out == phi || !phase->has_node(out)) continue;
       PhiNode* phi2 = out->as_Phi();
       Node *incr2 = phi2->in( LoopNode::LoopBackControl );
       // Look for induction variables of the form:  X += constant
@@ -1419,11 +1420,57 @@
   }
 }
 
+//---------------------collect_potentially_useful_predicates-----------------------
+// Helper function to collect potentially useful predicates to prevent them from
+// being eliminated by PhaseIdealLoop::eliminate_useless_predicates
+void PhaseIdealLoop::collect_potentially_useful_predicates(
+                         IdealLoopTree * loop, Unique_Node_List &useful_predicates) {
+  if (loop->_child) { // child
+    collect_potentially_useful_predicates(loop->_child, useful_predicates);
+  }
+
+  // self (only loops that we can apply loop predication may use their predicates)
+  if (loop->_head->is_Loop()     &&
+      !loop->_irreducible        &&
+      !loop->tail()->is_top()) {
+    LoopNode *lpn  = loop->_head->as_Loop();
+    Node* entry = lpn->in(LoopNode::EntryControl);
+    ProjNode *predicate_proj = find_predicate_insertion_point(entry);
+    if (predicate_proj != NULL ) { // right pattern that can be used by loop predication
+      assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
+      useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
+    }
+  }
+
+  if ( loop->_next ) { // sibling
+    collect_potentially_useful_predicates(loop->_next, useful_predicates);
+  }
+}
+
+//------------------------eliminate_useless_predicates-----------------------------
+// Eliminate all inserted predicates if they could not be used by loop predication.
+void PhaseIdealLoop::eliminate_useless_predicates() {
+  if (C->predicate_count() == 0) return; // no predicate left
+
+  Unique_Node_List useful_predicates; // to store useful predicates
+  if (C->has_loops()) {
+    collect_potentially_useful_predicates(_ltree_root->_child, useful_predicates);
+  }
+
+  for (int i = C->predicate_count(); i > 0; i--) {
+     Node * n = C->predicate_opaque1_node(i-1);
+     assert(n->Opcode() == Op_Opaque1, "must be");
+     if (!useful_predicates.member(n)) { // not in the useful list
+       _igvn.replace_node(n, n->in(1));
+     }
+  }
+}
+
 //=============================================================================
 //----------------------------build_and_optimize-------------------------------
 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
-void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) {
+void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool do_loop_pred) {
   int old_progress = C->major_progress();
 
   // Reset major-progress flag for the driver's heuristics
@@ -1576,6 +1623,12 @@
     return;
   }
 
+  // some parser-inserted loop predicates could never be used by loop
+  // predication. Eliminate them before loop optimization
+  if (UseLoopPredicate) {
+    eliminate_useless_predicates();
+  }
+
   // clear out the dead code
   while(_deadlist.size()) {
     _igvn.remove_globally_dead_node(_deadlist.pop());
@@ -1602,7 +1655,7 @@
       // Because RCE opportunities can be masked by split_thru_phi,
       // look for RCE candidates and inhibit split_thru_phi
       // on just their loop-phi's for this pass of loop opts
-      if( SplitIfBlocks && do_split_ifs ) {
+      if (SplitIfBlocks && do_split_ifs) {
         if (lpt->policy_range_check(this)) {
           lpt->_rce_candidate = 1; // = true
         }
@@ -1618,12 +1671,17 @@
     NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); );
   }
 
+  // Perform loop predication before iteration splitting
+  if (do_loop_pred && C->has_loops() && !C->major_progress()) {
+    _ltree_root->_child->loop_predication(this);
+  }
+
   // Perform iteration-splitting on inner loops.  Split iterations to avoid
   // range checks or one-shot null checks.
 
   // If split-if's didn't hack the graph too bad (no CFG changes)
   // then do loop opts.
-  if( C->has_loops() && !C->major_progress() ) {
+  if (C->has_loops() && !C->major_progress()) {
     memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) );
     _ltree_root->_child->iteration_split( this, worklist );
     // No verify after peeling!  GCM has hoisted code out of the loop.
@@ -1635,7 +1693,7 @@
   // Do verify graph edges in any case
   NOT_PRODUCT( C->verify_graph_edges(); );
 
-  if( !do_split_ifs ) {
+  if (!do_split_ifs) {
     // We saw major progress in Split-If to get here.  We forced a
     // pass with unrolling and not split-if, however more split-if's
     // might make progress.  If the unrolling didn't make progress
@@ -2762,6 +2820,22 @@
   Node *legal = LCA;            // Walk 'legal' up the IDOM chain
   Node *least = legal;          // Best legal position so far
   while( early != legal ) {     // While not at earliest legal
+#ifdef ASSERT
+    if (legal->is_Start() && !early->is_Root()) {
+      // Bad graph. Print idom path and fail.
+      tty->print_cr( "Bad graph detected in build_loop_late");
+      tty->print("n: ");n->dump(); tty->cr();
+      tty->print("early: ");early->dump(); tty->cr();
+      int ct = 0;
+      Node *dbg_legal = LCA;
+      while(!dbg_legal->is_Start() && ct < 100) {
+        tty->print("idom[%d] ",ct); dbg_legal->dump(); tty->cr();
+        ct++;
+        dbg_legal = idom(dbg_legal);
+      }
+      assert(false, "Bad graph detected in build_loop_late");
+    }
+#endif
     // Find least loop nesting depth
     legal = idom(legal);        // Bump up the IDOM tree
     // Check for lower nesting depth
--- a/src/share/vm/opto/loopnode.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/loopnode.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -30,6 +30,7 @@
 class Node;
 class PhaseIdealLoop;
 class VectorSet;
+class Invariance;
 struct small_cache;
 
 //
@@ -325,6 +326,10 @@
   // Returns TRUE if loop tree is structurally changed.
   bool beautify_loops( PhaseIdealLoop *phase );
 
+  // Perform optimization to use the loop predicates for null checks and range checks.
+  // Applies to any loop level (not just the innermost one)
+  bool loop_predication( PhaseIdealLoop *phase);
+
   // Perform iteration-splitting on inner loops.  Split iterations to
   // avoid range checks or one-shot null checks.  Returns false if the
   // current round of loop opts should stop.
@@ -395,6 +400,9 @@
   // into longer memory ops, we may want to increase alignment.
   bool policy_align( PhaseIdealLoop *phase ) const;
 
+  // Return TRUE if "iff" is a range check.
+  bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const;
+
   // Compute loop trip count from profile data
   void compute_profile_trip_cnt( PhaseIdealLoop *phase );
 
@@ -521,9 +529,6 @@
   }
   Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag );
 
-  // true if CFG node d dominates CFG node n
-  bool is_dominator(Node *d, Node *n);
-
   // Helper function for directing control inputs away from CFG split
   // points.
   Node *find_non_split_ctrl( Node *ctrl ) const {
@@ -572,6 +577,17 @@
     assert(n == find_non_split_ctrl(n), "must return legal ctrl" );
     return n;
   }
+  // true if CFG node d dominates CFG node n
+  bool is_dominator(Node *d, Node *n);
+  // return get_ctrl for a data node and self(n) for a CFG node
+  Node* ctrl_or_self(Node* n) {
+    if (has_ctrl(n))
+      return get_ctrl(n);
+    else {
+      assert (n->is_CFG(), "must be a CFG node");
+      return n;
+    }
+  }
 
 private:
   Node *get_ctrl_no_update( Node *i ) const {
@@ -600,7 +616,7 @@
   // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms.  Replace
   // the 'old_node' with 'new_node'.  Kill old-node.  Add a reference
   // from old_node to new_node to support the lazy update.  Reference
-  // replaces loop reference, since that is not neede for dead node.
+  // replaces loop reference, since that is not needed for dead node.
 public:
   void lazy_update( Node *old_node, Node *new_node ) {
     assert( old_node != new_node, "no cycles please" );
@@ -679,11 +695,11 @@
     _dom_lca_tags(C->comp_arena()),
     _verify_me(NULL),
     _verify_only(true) {
-    build_and_optimize(false);
+    build_and_optimize(false, false);
   }
 
   // build the loop tree and perform any requested optimizations
-  void build_and_optimize(bool do_split_if);
+  void build_and_optimize(bool do_split_if, bool do_loop_pred);
 
 public:
   // Dominators for the sea of nodes
@@ -694,13 +710,13 @@
   Node *dom_lca_internal( Node *n1, Node *n2 ) const;
 
   // Compute the Ideal Node to Loop mapping
-  PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs) :
+  PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool do_loop_pred) :
     PhaseTransform(Ideal_Loop),
     _igvn(igvn),
     _dom_lca_tags(C->comp_arena()),
     _verify_me(NULL),
     _verify_only(false) {
-    build_and_optimize(do_split_ifs);
+    build_and_optimize(do_split_ifs, do_loop_pred);
   }
 
   // Verify that verify_me made the same decisions as a fresh run.
@@ -710,7 +726,7 @@
     _dom_lca_tags(C->comp_arena()),
     _verify_me(verify_me),
     _verify_only(false) {
-    build_and_optimize(false);
+    build_and_optimize(false, false);
   }
 
   // Build and verify the loop tree without modifying the graph.  This
@@ -790,6 +806,30 @@
   // Return true if exp is a scaled induction var plus (or minus) constant
   bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
 
+  // Return true if proj is for "proj->[region->..]call_uct"
+  bool is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate = false);
+  // Return true for    "if(test)-> proj -> ...
+  //                          |
+  //                          V
+  //                      other_proj->[region->..]call_uct"
+  bool is_uncommon_trap_if_pattern(ProjNode* proj, bool must_reason_predicate = false);
+  // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
+  ProjNode* create_new_if_for_predicate(ProjNode* cont_proj);
+  // Find a good location to insert a predicate
+  ProjNode* find_predicate_insertion_point(Node* start_c);
+  // Construct a range check for a predicate if
+  BoolNode* rc_predicate(Node* ctrl,
+                         int scale, Node* offset,
+                         Node* init, Node* limit, Node* stride,
+                         Node* range);
+
+  // Implementation of the loop predication to promote checks outside the loop
+  bool loop_predication_impl(IdealLoopTree *loop);
+
+  // Helper function to collect predicate for eliminating the useless ones
+  void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
+  void eliminate_useless_predicates();
+
   // Eliminate range-checks and other trip-counter vs loop-invariant tests.
   void do_range_check( IdealLoopTree *loop, Node_List &old_new );
 
@@ -906,7 +946,6 @@
   const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
 
   // Helper functions
-  void register_new_node( Node *n, Node *blk );
   Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
   Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
   void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
@@ -918,6 +957,7 @@
 public:
   void set_created_loop_node() { _created_loop_node = true; }
   bool created_loop_node()     { return _created_loop_node; }
+  void register_new_node( Node *n, Node *blk );
 
 #ifndef PRODUCT
   void dump( ) const;
--- a/src/share/vm/opto/loopopts.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/loopopts.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@
     int offset = t_oop->offset();
     phi = new (C,region->req()) PhiNode(region, type, NULL, iid, index, offset);
   } else {
-    phi = new (C,region->req()) PhiNode(region, type);
+    phi = PhiNode::make_blank(region, n);
   }
   uint old_unique = C->unique();
   for( uint i = 1; i < region->req(); i++ ) {
--- a/src/share/vm/opto/machnode.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/machnode.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -636,7 +636,9 @@
 }
 #ifndef PRODUCT
 void MachCallJavaNode::dump_spec(outputStream *st) const {
-  if( _method ) {
+  if (_method_handle_invoke)
+    st->print("MethodHandle ");
+  if (_method) {
     _method->print_short_name(st);
     st->print(" ");
   }
@@ -644,6 +646,20 @@
 }
 #endif
 
+//------------------------------Registers--------------------------------------
+const RegMask &MachCallJavaNode::in_RegMask(uint idx) const {
+  // Values in the domain use the users calling convention, embodied in the
+  // _in_rms array of RegMasks.
+  if (idx < tf()->domain()->cnt())  return _in_rms[idx];
+  // Values outside the domain represent debug info
+  Matcher* m = Compile::current()->matcher();
+  // If this call is a MethodHandle invoke we have to use a different
+  // debugmask which does not include the register we use to save the
+  // SP over MH invokes.
+  RegMask** debugmask = _method_handle_invoke ? m->idealreg2mhdebugmask : m->idealreg2debugmask;
+  return *debugmask[in(idx)->ideal_reg()];
+}
+
 //=============================================================================
 uint MachCallStaticJavaNode::size_of() const { return sizeof(*this); }
 uint MachCallStaticJavaNode::cmp( const Node &n ) const {
--- a/src/share/vm/opto/machnode.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/machnode.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -232,7 +232,7 @@
   // Expand method for MachNode, replaces nodes representing pseudo
   // instructions with a set of nodes which represent real machine
   // instructions and compute the same value.
-  virtual MachNode *Expand( State *, Node_List &proj_list ) { return this; }
+  virtual MachNode *Expand( State *, Node_List &proj_list, Node* mem ) { return this; }
 
   // Bottom_type call; value comes from operand0
   virtual const class Type *bottom_type() const { return _opnds[0]->type(); }
@@ -662,9 +662,13 @@
   ciMethod* _method;             // Method being direct called
   int        _bci;               // Byte Code index of call byte code
   bool       _optimized_virtual; // Tells if node is a static call or an optimized virtual
+  bool       _method_handle_invoke;   // Tells if the call has to preserve SP
   MachCallJavaNode() : MachCallNode() {
     init_class_id(Class_MachCallJava);
   }
+
+  virtual const RegMask &in_RegMask(uint) const;
+
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const;
 #endif
--- a/src/share/vm/opto/macro.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/macro.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -316,6 +316,21 @@
         assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
       }
       mem = mem->in(MemNode::Memory);
+    } else if (mem->is_ClearArray()) {
+      if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
+        // Can not bypass initialization of the instance
+        // we are looking.
+        debug_only(intptr_t offset;)
+        assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
+        InitializeNode* init = alloc->as_Allocate()->initialization();
+        // We are looking for stored value, return Initialize node
+        // or memory edge from Allocate node.
+        if (init != NULL)
+          return init;
+        else
+          return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
+      }
+      // Otherwise skip it (the call updated 'mem' value).
     } else if (mem->Opcode() == Op_SCMemProj) {
       assert(mem->in(0)->is_LoadStore(), "sanity");
       const TypePtr* atype = mem->in(0)->in(MemNode::Address)->bottom_type()->is_ptr();
@@ -823,6 +838,18 @@
           Node *n = use->last_out(k);
           uint oc2 = use->outcnt();
           if (n->is_Store()) {
+#ifdef ASSERT
+            // Verify that there is no dependent MemBarVolatile nodes,
+            // they should be removed during IGVN, see MemBarNode::Ideal().
+            for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
+                                       p < pmax; p++) {
+              Node* mb = n->fast_out(p);
+              assert(mb->is_Initialize() || !mb->is_MemBar() ||
+                     mb->req() <= MemBarNode::Precedent ||
+                     mb->in(MemBarNode::Precedent) != n,
+                     "MemBarVolatile should be eliminated for non-escaping object");
+            }
+#endif
             _igvn.replace_node(n, n->in(MemNode::Memory));
           } else {
             eliminate_card_mark(n);
@@ -912,15 +939,29 @@
     return false;
   }
 
+  CompileLog* log = C->log();
+  if (log != NULL) {
+    Node* klass = alloc->in(AllocateNode::KlassNode);
+    const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
+    log->head("eliminate_allocation type='%d'",
+              log->identify(tklass->klass()));
+    JVMState* p = alloc->jvms();
+    while (p != NULL) {
+      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+      p = p->caller();
+    }
+    log->tail("eliminate_allocation");
+  }
+
   process_users_of_allocation(alloc);
 
 #ifndef PRODUCT
-if (PrintEliminateAllocations) {
-  if (alloc->is_AllocateArray())
-    tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
-  else
-    tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
-}
+  if (PrintEliminateAllocations) {
+    if (alloc->is_AllocateArray())
+      tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
+    else
+      tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
+  }
 #endif
 
   return true;
@@ -1639,6 +1680,18 @@
       } // if (!oldbox->is_eliminated())
   } // if (alock->is_Lock() && !lock->is_coarsened())
 
+  CompileLog* log = C->log();
+  if (log != NULL) {
+    log->head("eliminate_lock lock='%d'",
+              alock->is_Lock());
+    JVMState* p = alock->jvms();
+    while (p != NULL) {
+      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+      p = p->caller();
+    }
+    log->tail("eliminate_lock");
+  }
+
   #ifndef PRODUCT
   if (PrintEliminateLocks) {
     if (alock->is_Lock()) {
--- a/src/share/vm/opto/matcher.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/matcher.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,19 +70,27 @@
   _dontcare(&_states_arena) {
   C->set_matcher(this);
 
-  idealreg2spillmask[Op_RegI] = NULL;
-  idealreg2spillmask[Op_RegN] = NULL;
-  idealreg2spillmask[Op_RegL] = NULL;
-  idealreg2spillmask[Op_RegF] = NULL;
-  idealreg2spillmask[Op_RegD] = NULL;
-  idealreg2spillmask[Op_RegP] = NULL;
+  idealreg2spillmask  [Op_RegI] = NULL;
+  idealreg2spillmask  [Op_RegN] = NULL;
+  idealreg2spillmask  [Op_RegL] = NULL;
+  idealreg2spillmask  [Op_RegF] = NULL;
+  idealreg2spillmask  [Op_RegD] = NULL;
+  idealreg2spillmask  [Op_RegP] = NULL;
 
-  idealreg2debugmask[Op_RegI] = NULL;
-  idealreg2debugmask[Op_RegN] = NULL;
-  idealreg2debugmask[Op_RegL] = NULL;
-  idealreg2debugmask[Op_RegF] = NULL;
-  idealreg2debugmask[Op_RegD] = NULL;
-  idealreg2debugmask[Op_RegP] = NULL;
+  idealreg2debugmask  [Op_RegI] = NULL;
+  idealreg2debugmask  [Op_RegN] = NULL;
+  idealreg2debugmask  [Op_RegL] = NULL;
+  idealreg2debugmask  [Op_RegF] = NULL;
+  idealreg2debugmask  [Op_RegD] = NULL;
+  idealreg2debugmask  [Op_RegP] = NULL;
+
+  idealreg2mhdebugmask[Op_RegI] = NULL;
+  idealreg2mhdebugmask[Op_RegN] = NULL;
+  idealreg2mhdebugmask[Op_RegL] = NULL;
+  idealreg2mhdebugmask[Op_RegF] = NULL;
+  idealreg2mhdebugmask[Op_RegD] = NULL;
+  idealreg2mhdebugmask[Op_RegP] = NULL;
+
   debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
 }
 
@@ -389,19 +397,28 @@
 void Matcher::init_first_stack_mask() {
 
   // Allocate storage for spill masks as masks for the appropriate load type.
-  RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask)*12);
-  idealreg2spillmask[Op_RegN] = &rms[0];
-  idealreg2spillmask[Op_RegI] = &rms[1];
-  idealreg2spillmask[Op_RegL] = &rms[2];
-  idealreg2spillmask[Op_RegF] = &rms[3];
-  idealreg2spillmask[Op_RegD] = &rms[4];
-  idealreg2spillmask[Op_RegP] = &rms[5];
-  idealreg2debugmask[Op_RegN] = &rms[6];
-  idealreg2debugmask[Op_RegI] = &rms[7];
-  idealreg2debugmask[Op_RegL] = &rms[8];
-  idealreg2debugmask[Op_RegF] = &rms[9];
-  idealreg2debugmask[Op_RegD] = &rms[10];
-  idealreg2debugmask[Op_RegP] = &rms[11];
+  RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * 3*6);
+
+  idealreg2spillmask  [Op_RegN] = &rms[0];
+  idealreg2spillmask  [Op_RegI] = &rms[1];
+  idealreg2spillmask  [Op_RegL] = &rms[2];
+  idealreg2spillmask  [Op_RegF] = &rms[3];
+  idealreg2spillmask  [Op_RegD] = &rms[4];
+  idealreg2spillmask  [Op_RegP] = &rms[5];
+
+  idealreg2debugmask  [Op_RegN] = &rms[6];
+  idealreg2debugmask  [Op_RegI] = &rms[7];
+  idealreg2debugmask  [Op_RegL] = &rms[8];
+  idealreg2debugmask  [Op_RegF] = &rms[9];
+  idealreg2debugmask  [Op_RegD] = &rms[10];
+  idealreg2debugmask  [Op_RegP] = &rms[11];
+
+  idealreg2mhdebugmask[Op_RegN] = &rms[12];
+  idealreg2mhdebugmask[Op_RegI] = &rms[13];
+  idealreg2mhdebugmask[Op_RegL] = &rms[14];
+  idealreg2mhdebugmask[Op_RegF] = &rms[15];
+  idealreg2mhdebugmask[Op_RegD] = &rms[16];
+  idealreg2mhdebugmask[Op_RegP] = &rms[17];
 
   OptoReg::Name i;
 
@@ -442,12 +459,19 @@
   // Make up debug masks.  Any spill slot plus callee-save registers.
   // Caller-save registers are assumed to be trashable by the various
   // inline-cache fixup routines.
-  *idealreg2debugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
-  *idealreg2debugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
-  *idealreg2debugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
-  *idealreg2debugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
-  *idealreg2debugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
-  *idealreg2debugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
+  *idealreg2debugmask  [Op_RegN]= *idealreg2spillmask[Op_RegN];
+  *idealreg2debugmask  [Op_RegI]= *idealreg2spillmask[Op_RegI];
+  *idealreg2debugmask  [Op_RegL]= *idealreg2spillmask[Op_RegL];
+  *idealreg2debugmask  [Op_RegF]= *idealreg2spillmask[Op_RegF];
+  *idealreg2debugmask  [Op_RegD]= *idealreg2spillmask[Op_RegD];
+  *idealreg2debugmask  [Op_RegP]= *idealreg2spillmask[Op_RegP];
+
+  *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
+  *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
+  *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
+  *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
+  *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
+  *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
 
   // Prevent stub compilations from attempting to reference
   // callee-saved registers from debug info
@@ -458,14 +482,31 @@
     if( _register_save_policy[i] == 'C' ||
         _register_save_policy[i] == 'A' ||
         (_register_save_policy[i] == 'E' && exclude_soe) ) {
-      idealreg2debugmask[Op_RegN]->Remove(i);
-      idealreg2debugmask[Op_RegI]->Remove(i); // Exclude save-on-call
-      idealreg2debugmask[Op_RegL]->Remove(i); // registers from debug
-      idealreg2debugmask[Op_RegF]->Remove(i); // masks
-      idealreg2debugmask[Op_RegD]->Remove(i);
-      idealreg2debugmask[Op_RegP]->Remove(i);
+      idealreg2debugmask  [Op_RegN]->Remove(i);
+      idealreg2debugmask  [Op_RegI]->Remove(i); // Exclude save-on-call
+      idealreg2debugmask  [Op_RegL]->Remove(i); // registers from debug
+      idealreg2debugmask  [Op_RegF]->Remove(i); // masks
+      idealreg2debugmask  [Op_RegD]->Remove(i);
+      idealreg2debugmask  [Op_RegP]->Remove(i);
+
+      idealreg2mhdebugmask[Op_RegN]->Remove(i);
+      idealreg2mhdebugmask[Op_RegI]->Remove(i);
+      idealreg2mhdebugmask[Op_RegL]->Remove(i);
+      idealreg2mhdebugmask[Op_RegF]->Remove(i);
+      idealreg2mhdebugmask[Op_RegD]->Remove(i);
+      idealreg2mhdebugmask[Op_RegP]->Remove(i);
     }
   }
+
+  // Subtract the register we use to save the SP for MethodHandle
+  // invokes to from the debug mask.
+  const RegMask save_mask = method_handle_invoke_SP_save_mask();
+  idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
+  idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
+  idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
+  idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
+  idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
+  idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
 }
 
 //---------------------------is_save_on_entry----------------------------------
@@ -989,6 +1030,7 @@
   CallNode *call;
   const TypeTuple *domain;
   ciMethod*        method = NULL;
+  bool             is_method_handle_invoke = false;  // for special kill effects
   if( sfpt->is_Call() ) {
     call = sfpt->as_Call();
     domain = call->tf()->domain();
@@ -1013,6 +1055,8 @@
       mcall_java->_method = method;
       mcall_java->_bci = call_java->_bci;
       mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
+      is_method_handle_invoke = call_java->is_method_handle_invoke();
+      mcall_java->_method_handle_invoke = is_method_handle_invoke;
       if( mcall_java->is_MachCallStaticJava() )
         mcall_java->as_MachCallStaticJava()->_name =
          call_java->as_CallStaticJava()->_name;
@@ -1126,6 +1170,15 @@
     mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
   }
 
+  if (is_method_handle_invoke) {
+    // Kill some extra stack space in case method handles want to do
+    // a little in-place argument insertion.
+    int regs_per_word  = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const!
+    out_arg_limit_per_call += MethodHandlePushLimit * regs_per_word;
+    // Do not update mcall->_argsize because (a) the extra space is not
+    // pushed as arguments and (b) _argsize is dead (not used anywhere).
+  }
+
   // Compute the max stack slot killed by any call.  These will not be
   // available for debug info, and will be used to adjust FIRST_STACK_mask
   // after all call sites have been visited.
@@ -1527,7 +1580,7 @@
   uint num_proj = _proj_list.size();
 
   // Perform any 1-to-many expansions required
-  MachNode *ex = mach->Expand(s,_proj_list);
+  MachNode *ex = mach->Expand(s,_proj_list, mem);
   if( ex != mach ) {
     assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
     if( ex->in(1)->is_Con() )
@@ -1832,67 +1885,23 @@
       case Op_Binary:         // These are introduced in the Post_Visit state.
         ShouldNotReachHere();
         break;
-      case Op_StoreB:         // Do match these, despite no ideal reg
-      case Op_StoreC:
-      case Op_StoreCM:
-      case Op_StoreD:
-      case Op_StoreF:
-      case Op_StoreI:
-      case Op_StoreL:
-      case Op_StoreP:
-      case Op_StoreN:
-      case Op_Store16B:
-      case Op_Store8B:
-      case Op_Store4B:
-      case Op_Store8C:
-      case Op_Store4C:
-      case Op_Store2C:
-      case Op_Store4I:
-      case Op_Store2I:
-      case Op_Store2L:
-      case Op_Store4F:
-      case Op_Store2F:
-      case Op_Store2D:
       case Op_ClearArray:
       case Op_SafePoint:
         mem_op = true;
         break;
-      case Op_LoadB:
-      case Op_LoadUS:
-      case Op_LoadD:
-      case Op_LoadF:
-      case Op_LoadI:
-      case Op_LoadKlass:
-      case Op_LoadNKlass:
-      case Op_LoadL:
-      case Op_LoadS:
-      case Op_LoadP:
-      case Op_LoadN:
-      case Op_LoadRange:
-      case Op_LoadD_unaligned:
-      case Op_LoadL_unaligned:
-      case Op_Load16B:
-      case Op_Load8B:
-      case Op_Load4B:
-      case Op_Load4C:
-      case Op_Load2C:
-      case Op_Load8C:
-      case Op_Load8S:
-      case Op_Load4S:
-      case Op_Load2S:
-      case Op_Load4I:
-      case Op_Load2I:
-      case Op_Load2L:
-      case Op_Load4F:
-      case Op_Load2F:
-      case Op_Load2D:
-        mem_op = true;
-        // Must be root of match tree due to prior load conflict
-        if( C->subsume_loads() == false ) {
-          set_shared(n);
+      default:
+        if( n->is_Store() ) {
+          // Do match stores, despite no ideal reg
+          mem_op = true;
+          break;
+        }
+        if( n->is_Mem() ) { // Loads and LoadStores
+          mem_op = true;
+          // Loads must be root of match tree due to prior load conflict
+          if( C->subsume_loads() == false )
+            set_shared(n);
         }
         // Fall into default case
-      default:
         if( !n->ideal_reg() )
           set_dontcare(n);  // Unmatchable Nodes
       } // end_switch
@@ -1913,15 +1922,15 @@
           continue; // for(int i = ...)
         }
 
-        // Clone addressing expressions as they are "free" in most instructions
+        if( mop == Op_AddP && m->in(AddPNode::Base)->Opcode() == Op_DecodeN ) {
+          // Bases used in addresses must be shared but since
+          // they are shared through a DecodeN they may appear
+          // to have a single use so force sharing here.
+          set_shared(m->in(AddPNode::Base)->in(1));
+        }
+
+        // Clone addressing expressions as they are "free" in memory access instructions
         if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
-          if (m->in(AddPNode::Base)->Opcode() == Op_DecodeN) {
-            // Bases used in addresses must be shared but since
-            // they are shared through a DecodeN they may appear
-            // to have a single use so force sharing here.
-            set_shared(m->in(AddPNode::Base)->in(1));
-          }
-
           // Some inputs for address expression are not put on stack
           // to avoid marking them as shared and forcing them into register
           // if they are used only in address expressions.
--- a/src/share/vm/opto/matcher.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/matcher.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -117,8 +117,9 @@
   static const int base2reg[];        // Map Types to machine register types
   // Convert ideal machine register to a register mask for spill-loads
   static const RegMask *idealreg2regmask[];
-  RegMask *idealreg2spillmask[_last_machine_leaf];
-  RegMask *idealreg2debugmask[_last_machine_leaf];
+  RegMask *idealreg2spillmask  [_last_machine_leaf];
+  RegMask *idealreg2debugmask  [_last_machine_leaf];
+  RegMask *idealreg2mhdebugmask[_last_machine_leaf];
   void init_spill_mask( Node *ret );
   // Convert machine register number to register mask
   static uint mreg2regmask_max;
@@ -297,6 +298,8 @@
   // Register for MODL projection of divmodL
   static RegMask modL_proj_mask();
 
+  static const RegMask method_handle_invoke_SP_save_mask();
+
   // Java-Interpreter calling convention
   // (what you use when calling between compiled-Java and Interpreted-Java
 
--- a/src/share/vm/opto/memnode.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/memnode.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -123,6 +123,13 @@
       } else {
         assert(false, "unexpected projection");
       }
+    } else if (result->is_ClearArray()) {
+      if (!ClearArrayNode::step_through(&result, instance_id, phase)) {
+        // Can not bypass initialization of the instance
+        // we are looking for.
+        break;
+      }
+      // Otherwise skip it (the call updated 'result' value).
     } else if (result->is_MergeMem()) {
       result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty);
     }
@@ -255,6 +262,13 @@
     return NodeSentinel; // caller will return NULL
   }
 
+  // Do NOT remove or optimize the next lines: ensure a new alias index
+  // is allocated for an oop pointer type before Escape Analysis.
+  // Note: C++ will not remove it since the call has side effect.
+  if ( t_adr->isa_oopptr() ) {
+    int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
+  }
+
 #ifdef ASSERT
   Node* base = NULL;
   if (address->is_AddP())
@@ -530,6 +544,15 @@
       } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
         mem = mem->in(0)->in(TypeFunc::Memory);
         continue;           // (a) advance through independent MemBar memory
+      } else if (mem->is_ClearArray()) {
+        if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
+          // (the call updated 'mem' value)
+          continue;         // (a) advance through independent allocation memory
+        } else {
+          // Can not bypass initialization of the instance
+          // we are looking for.
+          return mem;
+        }
       } else if (mem->is_MergeMem()) {
         int alias_idx = phase->C->get_alias_index(adr_type());
         mem = mem->as_MergeMem()->memory_at(alias_idx);
@@ -1496,6 +1519,8 @@
       }
     }
   } else if (tp->base() == Type::InstPtr) {
+    const TypeInstPtr* tinst = tp->is_instptr();
+    ciKlass* klass = tinst->klass();
     assert( off != Type::OffsetBot ||
             // arrays can be cast to Objects
             tp->is_oopptr()->klass()->is_java_lang_Object() ||
@@ -1503,6 +1528,25 @@
             phase->C->has_unsafe_access(),
             "Field accesses must be precise" );
     // For oop loads, we expect the _type to be precise
+    if (OptimizeStringConcat && klass == phase->C->env()->String_klass() &&
+        adr->is_AddP() && off != Type::OffsetBot) {
+      // For constant Strings treat the fields as compile time constants.
+      Node* base = adr->in(AddPNode::Base);
+      if (base->Opcode() == Op_ConP) {
+        const TypeOopPtr* t = phase->type(base)->isa_oopptr();
+        ciObject* string = t->const_oop();
+        ciConstant constant = string->as_instance()->field_value_by_offset(off);
+        if (constant.basic_type() == T_INT) {
+          return TypeInt::make(constant.as_int());
+        } else if (constant.basic_type() == T_ARRAY) {
+          if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+            return TypeNarrowOop::make_from_constant(constant.as_object());
+          } else {
+            return TypeOopPtr::make_from_constant(constant.as_object());
+          }
+        }
+      }
+    }
   } else if (tp->base() == Type::KlassPtr) {
     assert( off != Type::OffsetBot ||
             // arrays can be cast to Objects
@@ -2426,6 +2470,31 @@
   return mem;
 }
 
+//----------------------------step_through----------------------------------
+// Return allocation input memory edge if it is different instance
+// or itself if it is the one we are looking for.
+bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
+  Node* n = *np;
+  assert(n->is_ClearArray(), "sanity");
+  intptr_t offset;
+  AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
+  // This method is called only before Allocate nodes are expanded during
+  // macro nodes expansion. Before that ClearArray nodes are only generated
+  // in LibraryCallKit::generate_arraycopy() which follows allocations.
+  assert(alloc != NULL, "should have allocation");
+  if (alloc->_idx == instance_id) {
+    // Can not bypass initialization of the instance we are looking for.
+    return false;
+  }
+  // Otherwise skip it.
+  InitializeNode* init = alloc->initialization();
+  if (init != NULL)
+    *np = init->in(TypeFunc::Memory);
+  else
+    *np = alloc->in(TypeFunc::Memory);
+  return true;
+}
+
 //----------------------------clear_memory-------------------------------------
 // Generate code to initialize object storage to zero.
 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
@@ -2599,7 +2668,30 @@
 // Return a node which is more "ideal" than the current node.  Strip out
 // control copies
 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  return remove_dead_region(phase, can_reshape) ? this : NULL;
+  if (remove_dead_region(phase, can_reshape)) return this;
+
+  // Eliminate volatile MemBars for scalar replaced objects.
+  if (can_reshape && req() == (Precedent+1) &&
+      (Opcode() == Op_MemBarAcquire || Opcode() == Op_MemBarVolatile)) {
+    // Volatile field loads and stores.
+    Node* my_mem = in(MemBarNode::Precedent);
+    if (my_mem != NULL && my_mem->is_Mem()) {
+      const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
+      // Check for scalar replaced object reference.
+      if( t_oop != NULL && t_oop->is_known_instance_field() &&
+          t_oop->offset() != Type::OffsetBot &&
+          t_oop->offset() != Type::OffsetTop) {
+        // Replace MemBar projections by its inputs.
+        PhaseIterGVN* igvn = phase->is_IterGVN();
+        igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
+        igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
+        // Must return either the original node (now dead) or a new node
+        // (Do not return a top here, since that would break the uniqueness of top.)
+        return new (phase->C, 1) ConINode(TypeInt::ZERO);
+      }
+    }
+  }
+  return NULL;
 }
 
 //------------------------------Value------------------------------------------
--- a/src/share/vm/opto/memnode.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/memnode.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -583,9 +583,22 @@
 // Preceeding equivalent StoreCMs may be eliminated.
 class StoreCMNode : public StoreNode {
  private:
+  virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
+  virtual uint cmp( const Node &n ) const {
+    return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
+      && StoreNode::cmp(n);
+  }
+  virtual uint size_of() const { return sizeof(*this); }
   int _oop_alias_idx;   // The alias_idx of OopStore
+
 public:
-  StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : StoreNode(c,mem,adr,at,val,oop_store), _oop_alias_idx(oop_alias_idx) {}
+  StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
+    StoreNode(c,mem,adr,at,val,oop_store),
+    _oop_alias_idx(oop_alias_idx) {
+    assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
+           _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
+           "bad oop alias idx");
+  }
   virtual int Opcode() const;
   virtual Node *Identity( PhaseTransform *phase );
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@@ -717,7 +730,10 @@
 //------------------------------ClearArray-------------------------------------
 class ClearArrayNode: public Node {
 public:
-  ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) : Node(ctrl,arymem,word_cnt,base) {}
+  ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
+    : Node(ctrl,arymem,word_cnt,base) {
+    init_class_id(Class_ClearArray);
+  }
   virtual int         Opcode() const;
   virtual const Type *bottom_type() const { return Type::MEMORY; }
   // ClearArray modifies array elements, and so affects only the
@@ -743,6 +759,9 @@
                             Node* start_offset,
                             Node* end_offset,
                             PhaseGVN* phase);
+  // Return allocation input memory edge if it is different instance
+  // or itself if it is the one we are looking for.
+  static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
 };
 
 //------------------------------StrComp-------------------------------------
--- a/src/share/vm/opto/node.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/node.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -47,6 +47,7 @@
 class CatchNode;
 class CatchProjNode;
 class CheckCastPPNode;
+class ClearArrayNode;
 class CmpNode;
 class CodeBuffer;
 class ConstraintCastNode;
@@ -599,8 +600,9 @@
     DEFINE_CLASS_ID(BoxLock,  Node, 10)
     DEFINE_CLASS_ID(Add,      Node, 11)
     DEFINE_CLASS_ID(Mul,      Node, 12)
+    DEFINE_CLASS_ID(ClearArray, Node, 13)
 
-    _max_classes  = ClassMask_Mul
+    _max_classes  = ClassMask_ClearArray
   };
   #undef DEFINE_CLASS_ID
 
@@ -661,18 +663,25 @@
     return (_flags & Flag_is_Call) != 0;
   }
 
+  CallNode* isa_Call() const {
+    return is_Call() ? as_Call() : NULL;
+  }
+
   CallNode *as_Call() const { // Only for CallNode (not for MachCallNode)
     assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class");
     return (CallNode*)this;
   }
 
-  #define DEFINE_CLASS_QUERY(type) \
-  bool is_##type() const { \
+  #define DEFINE_CLASS_QUERY(type)                           \
+  bool is_##type() const {                                   \
     return ((_class_id & ClassMask_##type) == Class_##type); \
-  } \
-  type##Node *as_##type() const { \
-    assert(is_##type(), "invalid node class"); \
-    return (type##Node*)this; \
+  }                                                          \
+  type##Node *as_##type() const {                            \
+    assert(is_##type(), "invalid node class");               \
+    return (type##Node*)this;                                \
+  }                                                          \
+  type##Node* isa_##type() const {                           \
+    return (is_##type()) ? as_##type() : NULL;               \
   }
 
   DEFINE_CLASS_QUERY(AbstractLock)
@@ -691,6 +700,7 @@
   DEFINE_CLASS_QUERY(CatchProj)
   DEFINE_CLASS_QUERY(CheckCastPP)
   DEFINE_CLASS_QUERY(ConstraintCast)
+  DEFINE_CLASS_QUERY(ClearArray)
   DEFINE_CLASS_QUERY(CMove)
   DEFINE_CLASS_QUERY(Cmp)
   DEFINE_CLASS_QUERY(CountedLoop)
@@ -1249,6 +1259,24 @@
 #undef I_VDUI_ONLY
 #undef VDUI_ONLY
 
+// An Iterator that truly follows the iterator pattern.  Doesn't
+// support deletion but could be made to.
+//
+//   for (SimpleDUIterator i(n); i.has_next(); i.next()) {
+//     Node* m = i.get();
+//
+class SimpleDUIterator : public StackObj {
+ private:
+  Node* node;
+  DUIterator_Fast i;
+  DUIterator_Fast imax;
+ public:
+  SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
+  bool has_next() { return i < imax; }
+  void next() { i++; }
+  Node* get() { return node->fast_out(i); }
+};
+
 
 //-----------------------------------------------------------------------------
 // Map dense integer indices to Nodes.  Uses classic doubling-array trick.
@@ -1290,6 +1318,12 @@
 public:
   Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
   Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
+  bool contains(Node* n) {
+    for (uint e = 0; e < size(); e++) {
+      if (at(e) == n) return true;
+    }
+    return false;
+  }
   void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
   void remove( uint i ) { Node_Array::remove(i); _cnt--; }
   void push( Node *b ) { map(_cnt++,b); }
--- a/src/share/vm/opto/output.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/output.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -794,6 +794,8 @@
 #endif
 
   int safepoint_pc_offset = current_offset;
+  bool is_method_handle_invoke = false;
+  bool return_oop = false;
 
   // Add the safepoint in the DebugInfoRecorder
   if( !mach->is_MachCall() ) {
@@ -801,6 +803,20 @@
     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
   } else {
     mcall = mach->as_MachCall();
+
+    // Is the call a MethodHandle call?
+    if (mcall->is_MachCallJava()) {
+      if (mcall->as_MachCallJava()->_method_handle_invoke) {
+        assert(has_method_handle_invokes(), "must have been set during call generation");
+        is_method_handle_invoke = true;
+      }
+    }
+
+    // Check if a call returns an object.
+    if (mcall->return_value_is_used() &&
+        mcall->tf()->range()->field_at(TypeFunc::Parms)->isa_ptr()) {
+      return_oop = true;
+    }
     safepoint_pc_offset += mcall->ret_addr_offset();
     debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
   }
@@ -911,9 +927,9 @@
     ciMethod* scope_method = method ? method : _method;
     // Describe the scope here
     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
-    assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
+    assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
     // Now we can describe the scope.
-    debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),jvms->should_reexecute(),locvals,expvals,monvals);
+    debug_info()->describe_scope(safepoint_pc_offset, scope_method, jvms->bci(), jvms->should_reexecute(), is_method_handle_invoke, return_oop, locvals, expvals, monvals);
   } // End jvms loop
 
   // Mark the end of the scope set.
@@ -1080,14 +1096,26 @@
   deopt_handler_req += MAX_stubs_size; // add marginal slop for handler
   stub_req += MAX_stubs_size;   // ensure per-stub margin
   code_req += MAX_inst_size;    // ensure per-instruction margin
+
   if (StressCodeBuffers)
     code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10;  // force expansion
-  int total_req = code_req + pad_req + stub_req + exception_handler_req + deopt_handler_req + const_req;
+
+  int total_req =
+    code_req +
+    pad_req +
+    stub_req +
+    exception_handler_req +
+    deopt_handler_req +              // deopt handler
+    const_req;
+
+  if (has_method_handle_invokes())
+    total_req += deopt_handler_req;  // deopt MH handler
+
   CodeBuffer* cb = code_buffer();
   cb->initialize(total_req, locs_req);
 
   // Have we run out of code space?
-  if (cb->blob() == NULL) {
+  if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     turn_off_compiler(this);
     return;
   }
@@ -1308,7 +1336,7 @@
 
       // Verify that there is sufficient space remaining
       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
-      if (cb->blob() == NULL) {
+      if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
         turn_off_compiler(this);
         return;
       }
@@ -1424,10 +1452,17 @@
     _code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
     // Emit the deopt handler code.
     _code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
+
+    // Emit the MethodHandle deopt handler code (if required).
+    if (has_method_handle_invokes()) {
+      // We can use the same code as for the normal deopt handler, we
+      // just need a different entry point address.
+      _code_offsets.set_value(CodeOffsets::DeoptMH, emit_deopt_handler(*cb));
+    }
   }
 
   // One last check for failed CodeBuffer::expand:
-  if (cb->blob() == NULL) {
+  if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     turn_off_compiler(this);
     return;
   }
--- a/src/share/vm/opto/parse.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/parse.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -39,6 +39,7 @@
   // Always between 0.0 and 1.0.  Represents the percentage of the method's
   // total execution time used at this call site.
   const float _site_invoke_ratio;
+  const int   _site_depth_adjust;
   float compute_callee_frequency( int caller_bci ) const;
 
   GrowableArray<InlineTree*> _subtrees;
@@ -50,7 +51,8 @@
              ciMethod* callee_method,
              JVMState* caller_jvms,
              int caller_bci,
-             float site_invoke_ratio);
+             float site_invoke_ratio,
+             int site_depth_adjust);
   InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
                                            JVMState* caller_jvms,
                                            int caller_bci);
@@ -61,14 +63,15 @@
 
   InlineTree *caller_tree()       const { return _caller_tree;  }
   InlineTree* callee_at(int bci, ciMethod* m) const;
-  int         inline_depth()      const { return _caller_jvms ? _caller_jvms->depth() : 0; }
+  int         inline_depth()      const { return stack_depth() + _site_depth_adjust; }
+  int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
 
 public:
   static InlineTree* build_inline_tree_root();
   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
 
   // For temporary (stack-allocated, stateless) ilts:
-  InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio);
+  InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust);
 
   // InlineTree enum
   enum InlineStyle {
@@ -427,6 +430,11 @@
     }
   }
 
+  // Return true if the parser should add a loop predicate
+  bool should_add_predicate(int target_bci);
+  // Insert a loop predicate into the graph
+  void add_predicate();
+
   // Note:  Intrinsic generation routines may be found in library_call.cpp.
 
   // Helper function to setup Ideal Call nodes
@@ -488,7 +496,7 @@
 
   void    do_ifnull(BoolTest::mask btest, Node* c);
   void    do_if(BoolTest::mask btest, Node* c);
-  void    repush_if_args();
+  int     repush_if_args();
   void    adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
                               Block* path, Block* other_path);
   IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
--- a/src/share/vm/opto/parse1.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/parse1.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -231,12 +231,13 @@
 
   // Use the raw liveness computation to make sure that unexpected
   // values don't propagate into the OSR frame.
-  MethodLivenessResult live_locals = method()->raw_liveness_at_bci(osr_bci());
+  MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
   if (!live_locals.is_valid()) {
     // Degenerate or breakpointed method.
     C->record_method_not_compilable("OSR in empty or breakpointed method");
     return;
   }
+  MethodLivenessResult raw_live_locals = method()->raw_liveness_at_bci(osr_bci());
 
   // Extract the needed locals from the interpreter frame.
   Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
@@ -316,6 +317,10 @@
         continue;
       }
     }
+    if (type->basic_type() == T_ADDRESS && !raw_live_locals.at(index)) {
+      // Skip type check for dead address locals
+      continue;
+    }
     set_local(index, check_interpreter_type(l, type, bad_type_exit));
   }
 
@@ -819,7 +824,6 @@
   case Bytecodes::_ddiv:
   case Bytecodes::_checkcast:
   case Bytecodes::_instanceof:
-  case Bytecodes::_athrow:
   case Bytecodes::_anewarray:
   case Bytecodes::_newarray:
   case Bytecodes::_multianewarray:
@@ -829,6 +833,8 @@
     return true;
     break;
 
+  // Don't rerun athrow since it's part of the exception path.
+  case Bytecodes::_athrow:
   case Bytecodes::_invokestatic:
   case Bytecodes::_invokedynamic:
   case Bytecodes::_invokespecial:
@@ -1378,6 +1384,10 @@
     set_parse_bci(iter().cur_bci());
 
     if (bci() == block()->limit()) {
+      // insert a predicate if it falls through to a loop head block
+      if (should_add_predicate(bci())){
+        add_predicate();
+      }
       // Do not walk into the next block until directed by do_all_blocks.
       merge(bci());
       break;
@@ -2078,6 +2088,37 @@
   }
 }
 
+//------------------------------should_add_predicate--------------------------
+bool Parse::should_add_predicate(int target_bci) {
+  if (!UseLoopPredicate) return false;
+  Block* target = successor_for_bci(target_bci);
+  if (target != NULL          &&
+      target->is_loop_head()  &&
+      block()->rpo() < target->rpo()) {
+    return true;
+  }
+  return false;
+}
+
+//------------------------------add_predicate---------------------------------
+void Parse::add_predicate() {
+  assert(UseLoopPredicate,"use only for loop predicate");
+  Node *cont    = _gvn.intcon(1);
+  Node* opq     = _gvn.transform(new (C, 2) Opaque1Node(C, cont));
+  Node *bol     = _gvn.transform(new (C, 2) Conv2BNode(opq));
+  IfNode* iff   = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
+  Node* iffalse = _gvn.transform(new (C, 1) IfFalseNode(iff));
+  C->add_predicate_opaq(opq);
+  {
+    PreserveJVMState pjvms(this);
+    set_control(iffalse);
+    uncommon_trap(Deoptimization::Reason_predicate,
+                  Deoptimization::Action_maybe_recompile);
+  }
+  Node* iftrue = _gvn.transform(new (C, 1) IfTrueNode(iff));
+  set_control(iftrue);
+}
+
 #ifndef PRODUCT
 //------------------------show_parse_info--------------------------------------
 void Parse::show_parse_info() {
--- a/src/share/vm/opto/parse2.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/parse2.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -278,6 +278,11 @@
   if (len < 1) {
     // If this is a backward branch, add safepoint
     maybe_add_safepoint(default_dest);
+    if (should_add_predicate(default_dest)){
+      _sp += 1; // set original stack for use by uncommon_trap
+      add_predicate();
+      _sp -= 1;
+    }
     merge(default_dest);
     return;
   }
@@ -324,6 +329,11 @@
 
   if (len < 1) {    // If this is a backward branch, add safepoint
     maybe_add_safepoint(default_dest);
+    if (should_add_predicate(default_dest)){
+      _sp += 1; // set original stack for use by uncommon_trap
+      add_predicate();
+      _sp -= 1;
+    }
     merge(default_dest);
     return;
   }
@@ -731,6 +741,9 @@
   push(_gvn.makecon(ret_addr));
 
   // Flow to the jsr.
+  if (should_add_predicate(jsr_bci)){
+    add_predicate();
+  }
   merge(jsr_bci);
 }
 
@@ -881,7 +894,7 @@
 
 //-------------------------------repush_if_args--------------------------------
 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
-inline void Parse::repush_if_args() {
+inline int Parse::repush_if_args() {
 #ifndef PRODUCT
   if (PrintOpto && WizardMode) {
     tty->print("defending against excessive implicit null exceptions on %s @%d in ",
@@ -895,6 +908,7 @@
   assert(argument(0) != NULL, "must exist");
   assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
   _sp += bc_depth;
+  return bc_depth;
 }
 
 //----------------------------------do_ifnull----------------------------------
@@ -954,8 +968,14 @@
       // Update method data
       profile_taken_branch(target_bci);
       adjust_map_after_if(btest, c, prob, branch_block, next_block);
-      if (!stopped())
+      if (!stopped()) {
+        if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop
+          int nargs = repush_if_args(); // set original stack for uncommon_trap
+          add_predicate();
+          _sp -= nargs;
+        }
         merge(target_bci);
+      }
     }
   }
 
@@ -1076,8 +1096,14 @@
       // Update method data
       profile_taken_branch(target_bci);
       adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
-      if (!stopped())
+      if (!stopped()) {
+        if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop
+          int nargs = repush_if_args(); // set original stack for the uncommon_trap
+          add_predicate();
+          _sp -= nargs;
+        }
         merge(target_bci);
+      }
     }
   }
 
@@ -2053,13 +2079,6 @@
     // null exception oop throws NULL pointer exception
     do_null_check(peek(), T_OBJECT);
     if (stopped())  return;
-    if (env()->jvmti_can_post_exceptions()) {
-      // "Full-speed throwing" is not necessary here,
-      // since we're notifying the VM on every throw.
-      uncommon_trap(Deoptimization::Reason_unhandled,
-                    Deoptimization::Action_none);
-      return;
-    }
     // Hook the thrown exception directly to subsequent handlers.
     if (BailoutToInterpreterForThrows) {
       // Keep method interpreted from now on.
@@ -2067,6 +2086,11 @@
                     Deoptimization::Action_make_not_compilable);
       return;
     }
+    if (env()->jvmti_can_post_on_exceptions()) {
+      // check if we must post exception events, take uncommon trap if so (with must_throw = false)
+      uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
+    }
+    // Here if either can_post_on_exceptions or should_post_on_exceptions is false
     add_exception_state(make_exception_state(peek()));
     break;
 
@@ -2080,6 +2104,10 @@
     // Update method data
     profile_taken_branch(target_bci);
 
+    // Add loop predicate if it goes to a loop
+    if (should_add_predicate(target_bci)){
+      add_predicate();
+    }
     // Merge the current control into the target basic block
     merge(target_bci);
 
--- a/src/share/vm/opto/parse3.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/parse3.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,25 @@
 
 void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) {
   // Does this field have a constant value?  If so, just push the value.
-  if (field->is_constant() && push_constant(field->constant_value()))  return;
+  if (field->is_constant()) {
+    if (field->is_static()) {
+      // final static field
+      if (push_constant(field->constant_value()))
+        return;
+    }
+    else {
+      // final non-static field of a trusted class ({java,sun}.dyn
+      // classes).
+      if (obj->is_Con()) {
+        const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
+        ciObject* constant_oop = oop_ptr->const_oop();
+        ciConstant constant = field->constant_value_of(constant_oop);
+
+        if (push_constant(constant, true))
+          return;
+      }
+    }
+  }
 
   ciType* field_klass = field->type();
   bool is_vol = field->is_volatile();
@@ -145,7 +163,7 @@
     if (!field->type()->is_loaded()) {
       type = TypeInstPtr::BOTTOM;
       must_assert_null = true;
-    } else if (field->is_constant()) {
+    } else if (field->is_constant() && field->is_static()) {
       // This can happen if the constant oop is non-perm.
       ciObject* con = field->constant_value().as_object();
       // Do not "join" in the previous type; it doesn't add value,
@@ -240,19 +258,19 @@
     // membar is dependent on the store, keeping any other membars generated
     // below from floating up past the store.
     int adr_idx = C->get_alias_index(adr_type);
-    insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx);
+    insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx, store);
 
     // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
     // volatile alias indices. Skip this if the membar is redundant.
     if (adr_idx != Compile::AliasIdxBot) {
-      insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot);
+      insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot, store);
     }
 
     // Finally, place alias-index-specific membars for each volatile index
     // that isn't the adr_idx membar. Typically there's only 1 or 2.
     for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
       if (i != adr_idx && C->alias_type(i)->is_volatile()) {
-        insert_mem_bar_volatile(Op_MemBarVolatile, i);
+        insert_mem_bar_volatile(Op_MemBarVolatile, i, store);
       }
     }
   }
@@ -421,8 +439,18 @@
 
   // Can use multianewarray instead of [a]newarray if only one dimension,
   // or if all non-final dimensions are small constants.
-  if (expand_count == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
-    Node* obj = expand_multianewarray(array_klass, &length[0], ndimensions, ndimensions);
+  if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
+    Node* obj = NULL;
+    // Set the original stack and the reexecute bit for the interpreter
+    // to reexecute the multianewarray bytecode if deoptimization happens.
+    // Do it unconditionally even for one dimension multianewarray.
+    // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
+    // when AllocateArray node for newarray is created.
+    { PreserveReexecuteState preexecs(this);
+      _sp += ndimensions;
+      // Pass 0 as nargs since uncommon trap code does not need to restore stack.
+      obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
+    } //original reexecute and sp are set back here
     push(obj);
     return;
   }
--- a/src/share/vm/opto/parseHelper.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/parseHelper.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -221,6 +221,14 @@
 
   // Push resultant oop onto stack
   push(obj);
+
+  // Keep track of whether opportunities exist for StringBuilder
+  // optimizations.
+  if (OptimizeStringConcat &&
+      (klass == C->env()->StringBuilder_klass() ||
+       klass == C->env()->StringBuffer_klass())) {
+    C->set_has_stringbuilder(true);
+  }
 }
 
 #ifndef PRODUCT
@@ -406,8 +414,6 @@
 void Parse::profile_call(Node* receiver) {
   if (!method_data_update()) return;
 
-  profile_generic_call();
-
   switch (bc()) {
   case Bytecodes::_invokevirtual:
   case Bytecodes::_invokeinterface:
@@ -416,6 +422,7 @@
   case Bytecodes::_invokestatic:
   case Bytecodes::_invokedynamic:
   case Bytecodes::_invokespecial:
+    profile_generic_call();
     break;
   default: fatal("unexpected call bytecode");
   }
@@ -436,13 +443,16 @@
 void Parse::profile_receiver_type(Node* receiver) {
   assert(method_data_update(), "must be generating profile code");
 
-  // Skip if we aren't tracking receivers
-  if (TypeProfileWidth < 1) return;
-
   ciMethodData* md = method()->method_data();
   assert(md != NULL, "expected valid ciMethodData");
   ciProfileData* data = md->bci_to_data(bci());
   assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here");
+
+  // Skip if we aren't tracking receivers
+  if (TypeProfileWidth < 1) {
+    increment_md_counter_at(md, data, CounterData::count_offset());
+    return;
+  }
   ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData();
 
   Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0));
--- a/src/share/vm/opto/phase.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/phase.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -44,6 +44,7 @@
     BlockLayout,                // Linear ordering of blocks
     Register_Allocation,        // Register allocation, duh
     LIVE,                       // Dragon-book LIVE range problem
+    StringOpts,                 // StringBuilder related optimizations
     Interference_Graph,         // Building the IFG
     Coalesce,                   // Coalescing copies
     Ideal_Loop,                 // Find idealized trip-counted loops
--- a/src/share/vm/opto/phaseX.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/phaseX.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -345,7 +345,11 @@
   Node  *hash_find(const Node *n) { return _table.hash_find(n); }
 
   // Used after parsing to eliminate values that are no longer in program
-  void   remove_useless_nodes(VectorSet &useful) { _table.remove_useless_nodes(useful); }
+  void   remove_useless_nodes(VectorSet &useful) {
+    _table.remove_useless_nodes(useful);
+    // this may invalidate cached cons so reset the cache
+    init_con_caches();
+  }
 
   virtual ConNode* uncached_makecon(const Type* t);  // override from PhaseTransform
 
--- a/src/share/vm/opto/runtime.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/runtime.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -143,7 +143,7 @@
 // We failed the fast-path allocation.  Now we need to do a scavenge or GC
 // and try allocation again.
 
-void OptoRuntime::maybe_defer_card_mark(JavaThread* thread) {
+void OptoRuntime::new_store_pre_barrier(JavaThread* thread) {
   // After any safepoint, just before going back to compiled code,
   // we inform the GC that we will be doing initializing writes to
   // this object in the future without emitting card-marks, so
@@ -156,7 +156,7 @@
   assert(Universe::heap()->can_elide_tlab_store_barriers(),
          "compiler must check this first");
   // GC may decide to give back a safer copy of new_obj.
-  new_obj = Universe::heap()->defer_store_barrier(thread, new_obj);
+  new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj);
   thread->set_vm_result(new_obj);
 }
 
@@ -200,7 +200,7 @@
 
   if (GraphKit::use_ReduceInitialCardMarks()) {
     // inform GC that we won't do card marks for initializing writes.
-    maybe_defer_card_mark(thread);
+    new_store_pre_barrier(thread);
   }
 JRT_END
 
@@ -239,7 +239,7 @@
 
   if (GraphKit::use_ReduceInitialCardMarks()) {
     // inform GC that we won't do card marks for initializing writes.
-    maybe_defer_card_mark(thread);
+    new_store_pre_barrier(thread);
   }
 JRT_END
 
@@ -706,6 +706,11 @@
     // vc->set_receiver_count(empty_row, DataLayout::counter_increment);
     int count_off = ReceiverTypeData::receiver_count_cell_index(empty_row);
     *(mdp + count_off) = DataLayout::counter_increment;
+  } else {
+    // Receiver did not match any saved receiver and there is no empty row for it.
+    // Increment total counter to indicate polymorphic case.
+    intptr_t* count_p = (intptr_t*)(((byte*)(data)) + in_bytes(CounterData::count_offset()));
+    *count_p += DataLayout::counter_increment;
   }
 JRT_END
 
@@ -790,7 +795,7 @@
   NOT_PRODUCT(Exceptions::debug_check_abort(exception));
 
   #ifdef ASSERT
-    if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+    if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
       // should throw an exception here
       ShouldNotReachHere();
     }
@@ -810,7 +815,7 @@
     // we are switching to old paradigm: search for exception handler in caller_frame
     // instead in exception handler of caller_frame.sender()
 
-    if (JvmtiExport::can_post_exceptions()) {
+    if (JvmtiExport::can_post_on_exceptions()) {
       // "Full-speed catching" is not necessary here,
       // since we're notifying the VM on every catch.
       // Force deoptimization and the rest of the lookup
@@ -858,6 +863,9 @@
     thread->set_exception_pc(pc);
     thread->set_exception_handler_pc(handler_address);
     thread->set_exception_stack_size(0);
+
+    // Check if the exception PC is a MethodHandle call.
+    thread->set_is_method_handle_exception(nm->is_method_handle_return(pc));
   }
 
   // Restore correct return pc.  Was saved above.
@@ -936,7 +944,7 @@
 #endif
   assert (exception != NULL, "should have thrown a NULLPointerException");
 #ifdef ASSERT
-  if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
     // should throw an exception here
     ShouldNotReachHere();
   }
@@ -972,8 +980,8 @@
     assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
     frame caller_frame = stub_frame.sender(&reg_map);
 
-    VM_DeoptimizeFrame deopt(thread, caller_frame.id());
-    VMThread::execute(&deopt);
+    // bypass VM_DeoptimizeFrame and deoptimize the frame directly
+    Deoptimization::deoptimize_frame(thread, caller_frame.id());
   }
 }
 
--- a/src/share/vm/opto/runtime.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/runtime.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -133,8 +133,9 @@
   // Allocate storage for a objArray or typeArray
   static void new_array_C(klassOopDesc* array_klass, int len, JavaThread *thread);
 
-  // Post-slow-path-allocation step for implementing ReduceInitialCardMarks:
-  static void maybe_defer_card_mark(JavaThread* thread);
+  // Post-slow-path-allocation, pre-initializing-stores step for
+  // implementing ReduceInitialCardMarks
+  static void new_store_pre_barrier(JavaThread* thread);
 
   // Allocate storage for a multi-dimensional arrays
   // Note: needs to be fixed for arbitrary number of dimensions
--- a/src/share/vm/opto/split_if.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/split_if.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -219,6 +219,7 @@
 
 //------------------------------register_new_node------------------------------
 void PhaseIdealLoop::register_new_node( Node *n, Node *blk ) {
+  assert(!n->is_CFG(), "must be data node");
   _igvn.register_new_node_with_optimizer(n);
   set_ctrl(n, blk);
   IdealLoopTree *loop = get_loop(blk);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/opto/stringopts.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,1396 @@
+/*
+ * Copyright 2009-2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_stringopts.cpp.incl"
+
+#define __ kit.
+
+class StringConcat : public ResourceObj {
+ private:
+  PhaseStringOpts*    _stringopts;
+  Node*               _string_alloc;
+  AllocateNode*       _begin;          // The allocation the begins the pattern
+  CallStaticJavaNode* _end;            // The final call of the pattern.  Will either be
+                                       // SB.toString or or String.<init>(SB.toString)
+  bool                _multiple;       // indicates this is a fusion of two or more
+                                       // separate StringBuilders
+
+  Node*               _arguments;      // The list of arguments to be concatenated
+  GrowableArray<int>  _mode;           // into a String along with a mode flag
+                                       // indicating how to treat the value.
+
+  Node_List           _control;        // List of control nodes that will be deleted
+  Node_List           _uncommon_traps; // Uncommon traps that needs to be rewritten
+                                       // to restart at the initial JVMState.
+ public:
+  // Mode for converting arguments to Strings
+  enum {
+    StringMode,
+    IntMode,
+    CharMode
+  };
+
+  StringConcat(PhaseStringOpts* stringopts, CallStaticJavaNode* end):
+    _end(end),
+    _begin(NULL),
+    _multiple(false),
+    _string_alloc(NULL),
+    _stringopts(stringopts) {
+    _arguments = new (_stringopts->C, 1) Node(1);
+    _arguments->del_req(0);
+  }
+
+  bool validate_control_flow();
+
+  void merge_add() {
+#if 0
+    // XXX This is place holder code for reusing an existing String
+    // allocation but the logic for checking the state safety is
+    // probably inadequate at the moment.
+    CallProjections endprojs;
+    sc->end()->extract_projections(&endprojs, false);
+    if (endprojs.resproj != NULL) {
+      for (SimpleDUIterator i(endprojs.resproj); i.has_next(); i.next()) {
+        CallStaticJavaNode *use = i.get()->isa_CallStaticJava();
+        if (use != NULL && use->method() != NULL &&
+            use->method()->holder() == C->env()->String_klass() &&
+            use->method()->name() == ciSymbol::object_initializer_name() &&
+            use->in(TypeFunc::Parms + 1) == endprojs.resproj) {
+          // Found useless new String(sb.toString()) so reuse the newly allocated String
+          // when creating the result instead of allocating a new one.
+          sc->set_string_alloc(use->in(TypeFunc::Parms));
+          sc->set_end(use);
+        }
+      }
+    }
+#endif
+  }
+
+  StringConcat* merge(StringConcat* other, Node* arg);
+
+  void set_allocation(AllocateNode* alloc) {
+    _begin = alloc;
+  }
+
+  void append(Node* value, int mode) {
+    _arguments->add_req(value);
+    _mode.append(mode);
+  }
+  void push(Node* value, int mode) {
+    _arguments->ins_req(0, value);
+    _mode.insert_before(0, mode);
+  }
+  void push_string(Node* value) {
+    push(value, StringMode);
+  }
+  void push_int(Node* value) {
+    push(value, IntMode);
+  }
+  void push_char(Node* value) {
+    push(value, CharMode);
+  }
+
+  Node* argument(int i) {
+    return _arguments->in(i);
+  }
+  void set_argument(int i, Node* value) {
+    _arguments->set_req(i, value);
+  }
+  int num_arguments() {
+    return _mode.length();
+  }
+  int mode(int i) {
+    return _mode.at(i);
+  }
+  void add_control(Node* ctrl) {
+    assert(!_control.contains(ctrl), "only push once");
+    _control.push(ctrl);
+  }
+  CallStaticJavaNode* end() { return _end; }
+  AllocateNode* begin() { return _begin; }
+  Node* string_alloc() { return _string_alloc; }
+
+  void eliminate_unneeded_control();
+  void eliminate_initialize(InitializeNode* init);
+  void eliminate_call(CallNode* call);
+
+  void maybe_log_transform() {
+    CompileLog* log = _stringopts->C->log();
+    if (log != NULL) {
+      log->head("replace_string_concat arguments='%d' string_alloc='%d' multiple='%d'",
+                num_arguments(),
+                _string_alloc != NULL,
+                _multiple);
+      JVMState* p = _begin->jvms();
+      while (p != NULL) {
+        log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+        p = p->caller();
+      }
+      log->tail("replace_string_concat");
+    }
+  }
+
+  void convert_uncommon_traps(GraphKit& kit, const JVMState* jvms) {
+    for (uint u = 0; u < _uncommon_traps.size(); u++) {
+      Node* uct = _uncommon_traps.at(u);
+
+      // Build a new call using the jvms state of the allocate
+      address call_addr = SharedRuntime::uncommon_trap_blob()->instructions_begin();
+      const TypeFunc* call_type = OptoRuntime::uncommon_trap_Type();
+      int size = call_type->domain()->cnt();
+      const TypePtr* no_memory_effects = NULL;
+      Compile* C = _stringopts->C;
+      CallStaticJavaNode* call = new (C, size) CallStaticJavaNode(call_type, call_addr, "uncommon_trap",
+                                                                  jvms->bci(), no_memory_effects);
+      for (int e = 0; e < TypeFunc::Parms; e++) {
+        call->init_req(e, uct->in(e));
+      }
+      // Set the trap request to record intrinsic failure if this trap
+      // is taken too many times.  Ideally we would handle then traps by
+      // doing the original bookkeeping in the MDO so that if it caused
+      // the code to be thrown out we could still recompile and use the
+      // optimization.  Failing the uncommon traps doesn't really mean
+      // that the optimization is a bad idea but there's no other way to
+      // do the MDO updates currently.
+      int trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_intrinsic,
+                                                           Deoptimization::Action_make_not_entrant);
+      call->init_req(TypeFunc::Parms, __ intcon(trap_request));
+      kit.add_safepoint_edges(call);
+
+      _stringopts->gvn()->transform(call);
+      C->gvn_replace_by(uct, call);
+      uct->disconnect_inputs(NULL);
+    }
+  }
+
+  void cleanup() {
+    // disconnect the hook node
+    _arguments->disconnect_inputs(NULL);
+  }
+};
+
+
+void StringConcat::eliminate_unneeded_control() {
+  eliminate_initialize(begin()->initialization());
+  for (uint i = 0; i < _control.size(); i++) {
+    Node* n = _control.at(i);
+    if (n->is_Call()) {
+      if (n != _end) {
+        eliminate_call(n->as_Call());
+      }
+    } else if (n->is_IfTrue()) {
+      Compile* C = _stringopts->C;
+      C->gvn_replace_by(n, n->in(0)->in(0));
+      C->gvn_replace_by(n->in(0), C->top());
+    }
+  }
+}
+
+
+StringConcat* StringConcat::merge(StringConcat* other, Node* arg) {
+  StringConcat* result = new StringConcat(_stringopts, _end);
+  for (uint x = 0; x < _control.size(); x++) {
+    Node* n = _control.at(x);
+    if (n->is_Call()) {
+      result->_control.push(n);
+    }
+  }
+  for (uint x = 0; x < other->_control.size(); x++) {
+    Node* n = other->_control.at(x);
+    if (n->is_Call()) {
+      result->_control.push(n);
+    }
+  }
+  assert(result->_control.contains(other->_end), "what?");
+  assert(result->_control.contains(_begin), "what?");
+  for (int x = 0; x < num_arguments(); x++) {
+    if (argument(x) == arg) {
+      // replace the toString result with the all the arguments that
+      // made up the other StringConcat
+      for (int y = 0; y < other->num_arguments(); y++) {
+        result->append(other->argument(y), other->mode(y));
+      }
+    } else {
+      result->append(argument(x), mode(x));
+    }
+  }
+  result->set_allocation(other->_begin);
+  result->_multiple = true;
+  return result;
+}
+
+
+void StringConcat::eliminate_call(CallNode* call) {
+  Compile* C = _stringopts->C;
+  CallProjections projs;
+  call->extract_projections(&projs, false);
+  if (projs.fallthrough_catchproj != NULL) {
+    C->gvn_replace_by(projs.fallthrough_catchproj, call->in(TypeFunc::Control));
+  }
+  if (projs.fallthrough_memproj != NULL) {
+    C->gvn_replace_by(projs.fallthrough_memproj, call->in(TypeFunc::Memory));
+  }
+  if (projs.catchall_memproj != NULL) {
+    C->gvn_replace_by(projs.catchall_memproj, C->top());
+  }
+  if (projs.fallthrough_ioproj != NULL) {
+    C->gvn_replace_by(projs.fallthrough_ioproj, call->in(TypeFunc::I_O));
+  }
+  if (projs.catchall_ioproj != NULL) {
+    C->gvn_replace_by(projs.catchall_ioproj, C->top());
+  }
+  if (projs.catchall_catchproj != NULL) {
+    // EA can't cope with the partially collapsed graph this
+    // creates so put it on the worklist to be collapsed later.
+    for (SimpleDUIterator i(projs.catchall_catchproj); i.has_next(); i.next()) {
+      Node *use = i.get();
+      int opc = use->Opcode();
+      if (opc == Op_CreateEx || opc == Op_Region) {
+        _stringopts->record_dead_node(use);
+      }
+    }
+    C->gvn_replace_by(projs.catchall_catchproj, C->top());
+  }
+  if (projs.resproj != NULL) {
+    C->gvn_replace_by(projs.resproj, C->top());
+  }
+  C->gvn_replace_by(call, C->top());
+}
+
+void StringConcat::eliminate_initialize(InitializeNode* init) {
+  Compile* C = _stringopts->C;
+
+  // Eliminate Initialize node.
+  assert(init->outcnt() <= 2, "only a control and memory projection expected");
+  assert(init->req() <= InitializeNode::RawStores, "no pending inits");
+  Node *ctrl_proj = init->proj_out(TypeFunc::Control);
+  if (ctrl_proj != NULL) {
+    C->gvn_replace_by(ctrl_proj, init->in(TypeFunc::Control));
+  }
+  Node *mem_proj = init->proj_out(TypeFunc::Memory);
+  if (mem_proj != NULL) {
+    Node *mem = init->in(TypeFunc::Memory);
+    C->gvn_replace_by(mem_proj, mem);
+  }
+  C->gvn_replace_by(init, C->top());
+  init->disconnect_inputs(NULL);
+}
+
+Node_List PhaseStringOpts::collect_toString_calls() {
+  Node_List string_calls;
+  Node_List worklist;
+
+  _visited.Clear();
+
+  // Prime the worklist
+  for (uint i = 1; i < C->root()->len(); i++) {
+    Node* n = C->root()->in(i);
+    if (n != NULL && !_visited.test_set(n->_idx)) {
+      worklist.push(n);
+    }
+  }
+
+  while (worklist.size() > 0) {
+    Node* ctrl = worklist.pop();
+    if (ctrl->is_CallStaticJava()) {
+      CallStaticJavaNode* csj = ctrl->as_CallStaticJava();
+      ciMethod* m = csj->method();
+      if (m != NULL &&
+          (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
+           m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString)) {
+        string_calls.push(csj);
+      }
+    }
+    if (ctrl->in(0) != NULL && !_visited.test_set(ctrl->in(0)->_idx)) {
+      worklist.push(ctrl->in(0));
+    }
+    if (ctrl->is_Region()) {
+      for (uint i = 1; i < ctrl->len(); i++) {
+        if (ctrl->in(i) != NULL && !_visited.test_set(ctrl->in(i)->_idx)) {
+          worklist.push(ctrl->in(i));
+        }
+      }
+    }
+  }
+  return string_calls;
+}
+
+
+StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
+  ciMethod* m = call->method();
+  ciSymbol* string_sig;
+  ciSymbol* int_sig;
+  ciSymbol* char_sig;
+  if (m->holder() == C->env()->StringBuilder_klass()) {
+    string_sig = ciSymbol::String_StringBuilder_signature();
+    int_sig = ciSymbol::int_StringBuilder_signature();
+    char_sig = ciSymbol::char_StringBuilder_signature();
+  } else if (m->holder() == C->env()->StringBuffer_klass()) {
+    string_sig = ciSymbol::String_StringBuffer_signature();
+    int_sig = ciSymbol::int_StringBuffer_signature();
+    char_sig = ciSymbol::char_StringBuffer_signature();
+  } else {
+    return NULL;
+  }
+#ifndef PRODUCT
+  if (PrintOptimizeStringConcat) {
+    tty->print("considering toString call in ");
+    call->jvms()->dump_spec(tty); tty->cr();
+  }
+#endif
+
+  StringConcat* sc = new StringConcat(this, call);
+
+  AllocateNode* alloc = NULL;
+  InitializeNode* init = NULL;
+
+  // possible opportunity for StringBuilder fusion
+  CallStaticJavaNode* cnode = call;
+  while (cnode) {
+    Node* recv = cnode->in(TypeFunc::Parms)->uncast();
+    if (recv->is_Proj()) {
+      recv = recv->in(0);
+    }
+    cnode = recv->isa_CallStaticJava();
+    if (cnode == NULL) {
+      alloc = recv->isa_Allocate();
+      if (alloc == NULL) {
+        break;
+      }
+      // Find the constructor call
+      Node* result = alloc->result_cast();
+      if (result == NULL || !result->is_CheckCastPP()) {
+        // strange looking allocation
+#ifndef PRODUCT
+        if (PrintOptimizeStringConcat) {
+          tty->print("giving up because allocation looks strange ");
+          alloc->jvms()->dump_spec(tty); tty->cr();
+        }
+#endif
+        break;
+      }
+      Node* constructor = NULL;
+      for (SimpleDUIterator i(result); i.has_next(); i.next()) {
+        CallStaticJavaNode *use = i.get()->isa_CallStaticJava();
+        if (use != NULL && use->method() != NULL &&
+            use->method()->name() == ciSymbol::object_initializer_name() &&
+            use->method()->holder() == m->holder()) {
+          // Matched the constructor.
+          ciSymbol* sig = use->method()->signature()->as_symbol();
+          if (sig == ciSymbol::void_method_signature() ||
+              sig == ciSymbol::int_void_signature() ||
+              sig == ciSymbol::string_void_signature()) {
+            if (sig == ciSymbol::string_void_signature()) {
+              // StringBuilder(String) so pick this up as the first argument
+              assert(use->in(TypeFunc::Parms + 1) != NULL, "what?");
+              sc->push_string(use->in(TypeFunc::Parms + 1));
+            }
+            // The int variant takes an initial size for the backing
+            // array so just treat it like the void version.
+            constructor = use;
+          } else {
+#ifndef PRODUCT
+            if (PrintOptimizeStringConcat) {
+              tty->print("unexpected constructor signature: %s", sig->as_utf8());
+            }
+#endif
+          }
+          break;
+        }
+      }
+      if (constructor == NULL) {
+        // couldn't find constructor
+#ifndef PRODUCT
+        if (PrintOptimizeStringConcat) {
+          tty->print("giving up because couldn't find constructor ");
+          alloc->jvms()->dump_spec(tty);
+        }
+#endif
+        break;
+      }
+
+      // Walked all the way back and found the constructor call so see
+      // if this call converted into a direct string concatenation.
+      sc->add_control(call);
+      sc->add_control(constructor);
+      sc->add_control(alloc);
+      sc->set_allocation(alloc);
+      if (sc->validate_control_flow()) {
+        return sc;
+      } else {
+        return NULL;
+      }
+    } else if (cnode->method() == NULL) {
+      break;
+    } else if (cnode->method()->holder() == m->holder() &&
+               cnode->method()->name() == ciSymbol::append_name() &&
+               (cnode->method()->signature()->as_symbol() == string_sig ||
+                cnode->method()->signature()->as_symbol() == char_sig ||
+                cnode->method()->signature()->as_symbol() == int_sig)) {
+      sc->add_control(cnode);
+      Node* arg = cnode->in(TypeFunc::Parms + 1);
+      if (cnode->method()->signature()->as_symbol() == int_sig) {
+        sc->push_int(arg);
+      } else if (cnode->method()->signature()->as_symbol() == char_sig) {
+        sc->push_char(arg);
+      } else {
+        if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) {
+          CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava();
+          if (csj->method() != NULL &&
+              csj->method()->holder() == C->env()->Integer_klass() &&
+              csj->method()->name() == ciSymbol::toString_name()) {
+            sc->add_control(csj);
+            sc->push_int(csj->in(TypeFunc::Parms));
+            continue;
+          }
+        }
+        sc->push_string(arg);
+      }
+      continue;
+    } else {
+      // some unhandled signature
+#ifndef PRODUCT
+      if (PrintOptimizeStringConcat) {
+        tty->print("giving up because encountered unexpected signature ");
+        cnode->tf()->dump(); tty->cr();
+        cnode->in(TypeFunc::Parms + 1)->dump();
+      }
+#endif
+      break;
+    }
+  }
+  return NULL;
+}
+
+
+PhaseStringOpts::PhaseStringOpts(PhaseGVN* gvn, Unique_Node_List*):
+  Phase(StringOpts),
+  _gvn(gvn),
+  _visited(Thread::current()->resource_area()) {
+
+  assert(OptimizeStringConcat, "shouldn't be here");
+
+  size_table_field = C->env()->Integer_klass()->get_field_by_name(ciSymbol::make("sizeTable"),
+                                                                  ciSymbol::make("[I"), true);
+  if (size_table_field == NULL) {
+    // Something wrong so give up.
+    assert(false, "why can't we find Integer.sizeTable?");
+    return;
+  }
+
+  // Collect the types needed to talk about the various slices of memory
+  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
+                                                     false, NULL, 0);
+
+  const TypePtr* value_field_type = string_type->add_offset(java_lang_String::value_offset_in_bytes());
+  const TypePtr* offset_field_type = string_type->add_offset(java_lang_String::offset_offset_in_bytes());
+  const TypePtr* count_field_type = string_type->add_offset(java_lang_String::count_offset_in_bytes());
+
+  value_field_idx = C->get_alias_index(value_field_type);
+  count_field_idx = C->get_alias_index(count_field_type);
+  offset_field_idx = C->get_alias_index(offset_field_type);
+  char_adr_idx = C->get_alias_index(TypeAryPtr::CHARS);
+
+  // For each locally allocated StringBuffer see if the usages can be
+  // collapsed into a single String construction.
+
+  // Run through the list of allocation looking for SB.toString to see
+  // if it's possible to fuse the usage of the SB into a single String
+  // construction.
+  GrowableArray<StringConcat*> concats;
+  Node_List toStrings = collect_toString_calls();
+  while (toStrings.size() > 0) {
+    StringConcat* sc = build_candidate(toStrings.pop()->as_CallStaticJava());
+    if (sc != NULL) {
+      concats.push(sc);
+    }
+  }
+
+  // try to coalesce separate concats
+ restart:
+  for (int c = 0; c < concats.length(); c++) {
+    StringConcat* sc = concats.at(c);
+    for (int i = 0; i < sc->num_arguments(); i++) {
+      Node* arg = sc->argument(i);
+      if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) {
+        CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava();
+        if (csj->method() != NULL &&
+            (csj->method()->holder() == C->env()->StringBuffer_klass() ||
+             csj->method()->holder() == C->env()->StringBuilder_klass()) &&
+            csj->method()->name() == ciSymbol::toString_name()) {
+          for (int o = 0; o < concats.length(); o++) {
+            if (c == o) continue;
+            StringConcat* other = concats.at(o);
+            if (other->end() == csj) {
+#ifndef PRODUCT
+              if (PrintOptimizeStringConcat) {
+                tty->print_cr("considering stacked concats");
+              }
+#endif
+
+              StringConcat* merged = sc->merge(other, arg);
+              if (merged->validate_control_flow()) {
+#ifndef PRODUCT
+                if (PrintOptimizeStringConcat) {
+                  tty->print_cr("stacking would succeed");
+                }
+#endif
+                if (c < o) {
+                  concats.remove_at(o);
+                  concats.at_put(c, merged);
+                } else {
+                  concats.remove_at(c);
+                  concats.at_put(o, merged);
+                }
+                goto restart;
+              } else {
+#ifndef PRODUCT
+                if (PrintOptimizeStringConcat) {
+                  tty->print_cr("stacking would fail");
+                }
+#endif
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+
+  for (int c = 0; c < concats.length(); c++) {
+    StringConcat* sc = concats.at(c);
+    replace_string_concat(sc);
+  }
+
+  remove_dead_nodes();
+}
+
+void PhaseStringOpts::record_dead_node(Node* dead) {
+  dead_worklist.push(dead);
+}
+
+void PhaseStringOpts::remove_dead_nodes() {
+  // Delete any dead nodes to make things clean enough that escape
+  // analysis doesn't get unhappy.
+  while (dead_worklist.size() > 0) {
+    Node* use = dead_worklist.pop();
+    int opc = use->Opcode();
+    switch (opc) {
+      case Op_Region: {
+        uint i = 1;
+        for (i = 1; i < use->req(); i++) {
+          if (use->in(i) != C->top()) {
+            break;
+          }
+        }
+        if (i >= use->req()) {
+          for (SimpleDUIterator i(use); i.has_next(); i.next()) {
+            Node* m = i.get();
+            if (m->is_Phi()) {
+              dead_worklist.push(m);
+            }
+          }
+          C->gvn_replace_by(use, C->top());
+        }
+        break;
+      }
+      case Op_AddP:
+      case Op_CreateEx: {
+        // Recurisvely clean up references to CreateEx so EA doesn't
+        // get unhappy about the partially collapsed graph.
+        for (SimpleDUIterator i(use); i.has_next(); i.next()) {
+          Node* m = i.get();
+          if (m->is_AddP()) {
+            dead_worklist.push(m);
+          }
+        }
+        C->gvn_replace_by(use, C->top());
+        break;
+      }
+      case Op_Phi:
+        if (use->in(0) == C->top()) {
+          C->gvn_replace_by(use, C->top());
+        }
+        break;
+    }
+  }
+}
+
+
+bool StringConcat::validate_control_flow() {
+  // We found all the calls and arguments now lets see if it's
+  // safe to transform the graph as we would expect.
+
+  // Check to see if this resulted in too many uncommon traps previously
+  if (Compile::current()->too_many_traps(_begin->jvms()->method(), _begin->jvms()->bci(),
+                        Deoptimization::Reason_intrinsic)) {
+    return false;
+  }
+
+  // Walk backwards over the control flow from toString to the
+  // allocation and make sure all the control flow is ok.  This
+  // means it's either going to be eliminated once the calls are
+  // removed or it can safely be transformed into an uncommon
+  // trap.
+
+  int null_check_count = 0;
+  Unique_Node_List ctrl_path;
+
+  assert(_control.contains(_begin), "missing");
+  assert(_control.contains(_end), "missing");
+
+  // Collect the nodes that we know about and will eliminate into ctrl_path
+  for (uint i = 0; i < _control.size(); i++) {
+    // Push the call and it's control projection
+    Node* n = _control.at(i);
+    if (n->is_Allocate()) {
+      AllocateNode* an = n->as_Allocate();
+      InitializeNode* init = an->initialization();
+      ctrl_path.push(init);
+      ctrl_path.push(init->as_Multi()->proj_out(0));
+    }
+    if (n->is_Call()) {
+      CallNode* cn = n->as_Call();
+      ctrl_path.push(cn);
+      ctrl_path.push(cn->proj_out(0));
+      ctrl_path.push(cn->proj_out(0)->unique_out());
+      ctrl_path.push(cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0));
+    } else {
+      ShouldNotReachHere();
+    }
+  }
+
+  // Skip backwards through the control checking for unexpected contro flow
+  Node* ptr = _end;
+  bool fail = false;
+  while (ptr != _begin) {
+    if (ptr->is_Call() && ctrl_path.member(ptr)) {
+      ptr = ptr->in(0);
+    } else if (ptr->is_CatchProj() && ctrl_path.member(ptr)) {
+      ptr = ptr->in(0)->in(0)->in(0);
+      assert(ctrl_path.member(ptr), "should be a known piece of control");
+    } else if (ptr->is_IfTrue()) {
+      IfNode* iff = ptr->in(0)->as_If();
+      BoolNode* b = iff->in(1)->isa_Bool();
+      Node* cmp = b->in(1);
+      Node* v1 = cmp->in(1);
+      Node* v2 = cmp->in(2);
+      Node* otherproj = iff->proj_out(1 - ptr->as_Proj()->_con);
+
+      // Null check of the return of append which can simply be eliminated
+      if (b->_test._test == BoolTest::ne &&
+          v2->bottom_type() == TypePtr::NULL_PTR &&
+          v1->is_Proj() && ctrl_path.member(v1->in(0))) {
+        // NULL check of the return value of the append
+        null_check_count++;
+        if (otherproj->outcnt() == 1) {
+          CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
+          if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) {
+            ctrl_path.push(call);
+          }
+        }
+        _control.push(ptr);
+        ptr = ptr->in(0)->in(0);
+        continue;
+      }
+
+      // A test which leads to an uncommon trap which should be safe.
+      // Later this trap will be converted into a trap that restarts
+      // at the beginning.
+      if (otherproj->outcnt() == 1) {
+        CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
+        if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) {
+          // control flow leads to uct so should be ok
+          _uncommon_traps.push(call);
+          ctrl_path.push(call);
+          ptr = ptr->in(0)->in(0);
+          continue;
+        }
+      }
+
+#ifndef PRODUCT
+      // Some unexpected control flow we don't know how to handle.
+      if (PrintOptimizeStringConcat) {
+        tty->print_cr("failing with unknown test");
+        b->dump();
+        cmp->dump();
+        v1->dump();
+        v2->dump();
+        tty->cr();
+      }
+#endif
+      break;
+    } else if (ptr->is_Proj() && ptr->in(0)->is_Initialize()) {
+      ptr = ptr->in(0)->in(0);
+    } else if (ptr->is_Region()) {
+      Node* copy = ptr->as_Region()->is_copy();
+      if (copy != NULL) {
+        ptr = copy;
+        continue;
+      }
+      if (ptr->req() == 3 &&
+          ptr->in(1) != NULL && ptr->in(1)->is_Proj() &&
+          ptr->in(2) != NULL && ptr->in(2)->is_Proj() &&
+          ptr->in(1)->in(0) == ptr->in(2)->in(0) &&
+          ptr->in(1)->in(0) != NULL && ptr->in(1)->in(0)->is_If()) {
+        // Simple diamond.
+        // XXX should check for possibly merging stores.  simple data merges are ok.
+        ptr = ptr->in(1)->in(0)->in(0);
+        continue;
+      }
+#ifndef PRODUCT
+      if (PrintOptimizeStringConcat) {
+        tty->print_cr("fusion would fail for region");
+        _begin->dump();
+        ptr->dump(2);
+      }
+#endif
+      fail = true;
+      break;
+    } else {
+      // other unknown control
+      if (!fail) {
+#ifndef PRODUCT
+        if (PrintOptimizeStringConcat) {
+          tty->print_cr("fusion would fail for");
+          _begin->dump();
+        }
+#endif
+        fail = true;
+      }
+#ifndef PRODUCT
+      if (PrintOptimizeStringConcat) {
+        ptr->dump();
+      }
+#endif
+      ptr = ptr->in(0);
+    }
+  }
+#ifndef PRODUCT
+  if (PrintOptimizeStringConcat && fail) {
+    tty->cr();
+  }
+#endif
+  if (fail) return !fail;
+
+  // Validate that all these results produced are contained within
+  // this cluster of objects.  First collect all the results produced
+  // by calls in the region.
+  _stringopts->_visited.Clear();
+  Node_List worklist;
+  Node* final_result = _end->proj_out(TypeFunc::Parms);
+  for (uint i = 0; i < _control.size(); i++) {
+    CallNode* cnode = _control.at(i)->isa_Call();
+    if (cnode != NULL) {
+      _stringopts->_visited.test_set(cnode->_idx);
+    }
+    Node* result = cnode != NULL ? cnode->proj_out(TypeFunc::Parms) : NULL;
+    if (result != NULL && result != final_result) {
+      worklist.push(result);
+    }
+  }
+
+  Node* last_result = NULL;
+  while (worklist.size() > 0) {
+    Node* result = worklist.pop();
+    if (_stringopts->_visited.test_set(result->_idx))
+      continue;
+    for (SimpleDUIterator i(result); i.has_next(); i.next()) {
+      Node *use = i.get();
+      if (ctrl_path.member(use)) {
+        // already checked this
+        continue;
+      }
+      int opc = use->Opcode();
+      if (opc == Op_CmpP || opc == Op_Node) {
+        ctrl_path.push(use);
+        continue;
+      }
+      if (opc == Op_CastPP || opc == Op_CheckCastPP) {
+        for (SimpleDUIterator j(use); j.has_next(); j.next()) {
+          worklist.push(j.get());
+        }
+        worklist.push(use->in(1));
+        ctrl_path.push(use);
+        continue;
+      }
+#ifndef PRODUCT
+      if (PrintOptimizeStringConcat) {
+        if (result != last_result) {
+          last_result = result;
+          tty->print_cr("extra uses for result:");
+          last_result->dump();
+        }
+        use->dump();
+      }
+#endif
+      fail = true;
+      break;
+    }
+  }
+
+#ifndef PRODUCT
+  if (PrintOptimizeStringConcat && !fail) {
+    ttyLocker ttyl;
+    tty->cr();
+    tty->print("fusion would succeed (%d %d) for ", null_check_count, _uncommon_traps.size());
+    _begin->jvms()->dump_spec(tty); tty->cr();
+    for (int i = 0; i < num_arguments(); i++) {
+      argument(i)->dump();
+    }
+    _control.dump();
+    tty->cr();
+  }
+#endif
+
+  return !fail;
+}
+
+Node* PhaseStringOpts::fetch_static_field(GraphKit& kit, ciField* field) {
+  const TypeKlassPtr* klass_type = TypeKlassPtr::make(field->holder());
+  Node* klass_node = __ makecon(klass_type);
+  BasicType bt = field->layout_type();
+  ciType* field_klass = field->type();
+
+  const Type *type;
+  if( bt == T_OBJECT ) {
+    if (!field->type()->is_loaded()) {
+      type = TypeInstPtr::BOTTOM;
+    } else if (field->is_constant()) {
+      // This can happen if the constant oop is non-perm.
+      ciObject* con = field->constant_value().as_object();
+      // Do not "join" in the previous type; it doesn't add value,
+      // and may yield a vacuous result if the field is of interface type.
+      type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
+      assert(type != NULL, "field singleton type must be consistent");
+    } else {
+      type = TypeOopPtr::make_from_klass(field_klass->as_klass());
+    }
+  } else {
+    type = Type::get_const_basic_type(bt);
+  }
+
+  return kit.make_load(NULL, kit.basic_plus_adr(klass_node, field->offset_in_bytes()),
+                       type, T_OBJECT,
+                       C->get_alias_index(klass_type->add_offset(field->offset_in_bytes())));
+}
+
+Node* PhaseStringOpts::int_stringSize(GraphKit& kit, Node* arg) {
+  RegionNode *final_merge = new (C, 3) RegionNode(3);
+  kit.gvn().set_type(final_merge, Type::CONTROL);
+  Node* final_size = new (C, 3) PhiNode(final_merge, TypeInt::INT);
+  kit.gvn().set_type(final_size, TypeInt::INT);
+
+  IfNode* iff = kit.create_and_map_if(kit.control(),
+                                      __ Bool(__ CmpI(arg, __ intcon(0x80000000)), BoolTest::ne),
+                                      PROB_FAIR, COUNT_UNKNOWN);
+  Node* is_min = __ IfFalse(iff);
+  final_merge->init_req(1, is_min);
+  final_size->init_req(1, __ intcon(11));
+
+  kit.set_control(__ IfTrue(iff));
+  if (kit.stopped()) {
+    final_merge->init_req(2, C->top());
+    final_size->init_req(2, C->top());
+  } else {
+
+    // int size = (i < 0) ? stringSize(-i) + 1 : stringSize(i);
+    RegionNode *r = new (C, 3) RegionNode(3);
+    kit.gvn().set_type(r, Type::CONTROL);
+    Node *phi = new (C, 3) PhiNode(r, TypeInt::INT);
+    kit.gvn().set_type(phi, TypeInt::INT);
+    Node *size = new (C, 3) PhiNode(r, TypeInt::INT);
+    kit.gvn().set_type(size, TypeInt::INT);
+    Node* chk = __ CmpI(arg, __ intcon(0));
+    Node* p = __ Bool(chk, BoolTest::lt);
+    IfNode* iff = kit.create_and_map_if(kit.control(), p, PROB_FAIR, COUNT_UNKNOWN);
+    Node* lessthan = __ IfTrue(iff);
+    Node* greaterequal = __ IfFalse(iff);
+    r->init_req(1, lessthan);
+    phi->init_req(1, __ SubI(__ intcon(0), arg));
+    size->init_req(1, __ intcon(1));
+    r->init_req(2, greaterequal);
+    phi->init_req(2, arg);
+    size->init_req(2, __ intcon(0));
+    kit.set_control(r);
+    C->record_for_igvn(r);
+    C->record_for_igvn(phi);
+    C->record_for_igvn(size);
+
+    // for (int i=0; ; i++)
+    //   if (x <= sizeTable[i])
+    //     return i+1;
+    RegionNode *loop = new (C, 3) RegionNode(3);
+    loop->init_req(1, kit.control());
+    kit.gvn().set_type(loop, Type::CONTROL);
+
+    Node *index = new (C, 3) PhiNode(loop, TypeInt::INT);
+    index->init_req(1, __ intcon(0));
+    kit.gvn().set_type(index, TypeInt::INT);
+    kit.set_control(loop);
+    Node* sizeTable = fetch_static_field(kit, size_table_field);
+
+    Node* value = kit.load_array_element(NULL, sizeTable, index, TypeAryPtr::INTS);
+    C->record_for_igvn(value);
+    Node* limit = __ CmpI(phi, value);
+    Node* limitb = __ Bool(limit, BoolTest::le);
+    IfNode* iff2 = kit.create_and_map_if(kit.control(), limitb, PROB_MIN, COUNT_UNKNOWN);
+    Node* lessEqual = __ IfTrue(iff2);
+    Node* greater = __ IfFalse(iff2);
+
+    loop->init_req(2, greater);
+    index->init_req(2, __ AddI(index, __ intcon(1)));
+
+    kit.set_control(lessEqual);
+    C->record_for_igvn(loop);
+    C->record_for_igvn(index);
+
+    final_merge->init_req(2, kit.control());
+    final_size->init_req(2, __ AddI(__ AddI(index, size), __ intcon(1)));
+  }
+
+  kit.set_control(final_merge);
+  C->record_for_igvn(final_merge);
+  C->record_for_igvn(final_size);
+
+  return final_size;
+}
+
+void PhaseStringOpts::int_getChars(GraphKit& kit, Node* arg, Node* char_array, Node* start, Node* end) {
+  RegionNode *final_merge = new (C, 4) RegionNode(4);
+  kit.gvn().set_type(final_merge, Type::CONTROL);
+  Node *final_mem = PhiNode::make(final_merge, kit.memory(char_adr_idx), Type::MEMORY, TypeAryPtr::CHARS);
+  kit.gvn().set_type(final_mem, Type::MEMORY);
+
+  // need to handle Integer.MIN_VALUE specially because negating doesn't make it positive
+  {
+    // i == MIN_VALUE
+    IfNode* iff = kit.create_and_map_if(kit.control(),
+                                        __ Bool(__ CmpI(arg, __ intcon(0x80000000)), BoolTest::ne),
+                                        PROB_FAIR, COUNT_UNKNOWN);
+
+    Node* old_mem = kit.memory(char_adr_idx);
+
+    kit.set_control(__ IfFalse(iff));
+    if (kit.stopped()) {
+      // Statically not equal to MIN_VALUE so this path is dead
+      final_merge->init_req(3, kit.control());
+    } else {
+      copy_string(kit, __ makecon(TypeInstPtr::make(C->env()->the_min_jint_string())),
+                  char_array, start);
+      final_merge->init_req(3, kit.control());
+      final_mem->init_req(3, kit.memory(char_adr_idx));
+    }
+
+    kit.set_control(__ IfTrue(iff));
+    kit.set_memory(old_mem, char_adr_idx);
+  }
+
+
+  // Simplified version of Integer.getChars
+
+  // int q, r;
+  // int charPos = index;
+  Node* charPos = end;
+
+  // char sign = 0;
+
+  Node* i = arg;
+  Node* sign = __ intcon(0);
+
+  // if (i < 0) {
+  //     sign = '-';
+  //     i = -i;
+  // }
+  {
+    IfNode* iff = kit.create_and_map_if(kit.control(),
+                                        __ Bool(__ CmpI(arg, __ intcon(0)), BoolTest::lt),
+                                        PROB_FAIR, COUNT_UNKNOWN);
+
+    RegionNode *merge = new (C, 3) RegionNode(3);
+    kit.gvn().set_type(merge, Type::CONTROL);
+    i = new (C, 3) PhiNode(merge, TypeInt::INT);
+    kit.gvn().set_type(i, TypeInt::INT);
+    sign = new (C, 3) PhiNode(merge, TypeInt::INT);
+    kit.gvn().set_type(sign, TypeInt::INT);
+
+    merge->init_req(1, __ IfTrue(iff));
+    i->init_req(1, __ SubI(__ intcon(0), arg));
+    sign->init_req(1, __ intcon('-'));
+    merge->init_req(2, __ IfFalse(iff));
+    i->init_req(2, arg);
+    sign->init_req(2, __ intcon(0));
+
+    kit.set_control(merge);
+
+    C->record_for_igvn(merge);
+    C->record_for_igvn(i);
+    C->record_for_igvn(sign);
+  }
+
+  // for (;;) {
+  //     q = i / 10;
+  //     r = i - ((q << 3) + (q << 1));  // r = i-(q*10) ...
+  //     buf [--charPos] = digits [r];
+  //     i = q;
+  //     if (i == 0) break;
+  // }
+
+  {
+    RegionNode *head = new (C, 3) RegionNode(3);
+    head->init_req(1, kit.control());
+    kit.gvn().set_type(head, Type::CONTROL);
+    Node *i_phi = new (C, 3) PhiNode(head, TypeInt::INT);
+    i_phi->init_req(1, i);
+    kit.gvn().set_type(i_phi, TypeInt::INT);
+    charPos = PhiNode::make(head, charPos);
+    kit.gvn().set_type(charPos, TypeInt::INT);
+    Node *mem = PhiNode::make(head, kit.memory(char_adr_idx), Type::MEMORY, TypeAryPtr::CHARS);
+    kit.gvn().set_type(mem, Type::MEMORY);
+    kit.set_control(head);
+    kit.set_memory(mem, char_adr_idx);
+
+    Node* q = __ DivI(NULL, i_phi, __ intcon(10));
+    Node* r = __ SubI(i_phi, __ AddI(__ LShiftI(q, __ intcon(3)),
+                                     __ LShiftI(q, __ intcon(1))));
+    Node* m1 = __ SubI(charPos, __ intcon(1));
+    Node* ch = __ AddI(r, __ intcon('0'));
+
+    Node* st = __ store_to_memory(kit.control(), kit.array_element_address(char_array, m1, T_CHAR),
+                                  ch, T_CHAR, char_adr_idx);
+
+
+    IfNode* iff = kit.create_and_map_if(head, __ Bool(__ CmpI(q, __ intcon(0)), BoolTest::ne),
+                                        PROB_FAIR, COUNT_UNKNOWN);
+    Node* ne = __ IfTrue(iff);
+    Node* eq = __ IfFalse(iff);
+
+    head->init_req(2, ne);
+    mem->init_req(2, st);
+    i_phi->init_req(2, q);
+    charPos->init_req(2, m1);
+
+    charPos = m1;
+
+    kit.set_control(eq);
+    kit.set_memory(st, char_adr_idx);
+
+    C->record_for_igvn(head);
+    C->record_for_igvn(mem);
+    C->record_for_igvn(i_phi);
+    C->record_for_igvn(charPos);
+  }
+
+  {
+    // if (sign != 0) {
+    //     buf [--charPos] = sign;
+    // }
+    IfNode* iff = kit.create_and_map_if(kit.control(),
+                                        __ Bool(__ CmpI(sign, __ intcon(0)), BoolTest::ne),
+                                        PROB_FAIR, COUNT_UNKNOWN);
+
+    final_merge->init_req(2, __ IfFalse(iff));
+    final_mem->init_req(2, kit.memory(char_adr_idx));
+
+    kit.set_control(__ IfTrue(iff));
+    if (kit.stopped()) {
+      final_merge->init_req(1, C->top());
+      final_mem->init_req(1, C->top());
+    } else {
+      Node* m1 = __ SubI(charPos, __ intcon(1));
+      Node* st = __ store_to_memory(kit.control(), kit.array_element_address(char_array, m1, T_CHAR),
+                                    sign, T_CHAR, char_adr_idx);
+
+      final_merge->init_req(1, kit.control());
+      final_mem->init_req(1, st);
+    }
+
+    kit.set_control(final_merge);
+    kit.set_memory(final_mem, char_adr_idx);
+
+    C->record_for_igvn(final_merge);
+    C->record_for_igvn(final_mem);
+  }
+}
+
+
+Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start) {
+  Node* string = str;
+  Node* offset = kit.make_load(NULL,
+                               kit.basic_plus_adr(string, string, java_lang_String::offset_offset_in_bytes()),
+                               TypeInt::INT, T_INT, offset_field_idx);
+  Node* count = kit.make_load(NULL,
+                              kit.basic_plus_adr(string, string, java_lang_String::count_offset_in_bytes()),
+                              TypeInt::INT, T_INT, count_field_idx);
+  const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
+                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
+                                                   ciTypeArrayKlass::make(T_CHAR), true, 0);
+  Node* value = kit.make_load(NULL,
+                              kit.basic_plus_adr(string, string, java_lang_String::value_offset_in_bytes()),
+                              value_type, T_OBJECT, value_field_idx);
+
+  // copy the contents
+  if (offset->is_Con() && count->is_Con() && value->is_Con() && count->get_int() < unroll_string_copy_length) {
+    // For small constant strings just emit individual stores.
+    // A length of 6 seems like a good space/speed tradeof.
+    int c = count->get_int();
+    int o = offset->get_int();
+    const TypeOopPtr* t = kit.gvn().type(value)->isa_oopptr();
+    ciTypeArray* value_array = t->const_oop()->as_type_array();
+    for (int e = 0; e < c; e++) {
+      __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
+                         __ intcon(value_array->char_at(o + e)), T_CHAR, char_adr_idx);
+      start = __ AddI(start, __ intcon(1));
+    }
+  } else {
+    Node* src_ptr = kit.array_element_address(value, offset, T_CHAR);
+    Node* dst_ptr = kit.array_element_address(char_array, start, T_CHAR);
+    Node* c = count;
+    Node* extra = NULL;
+#ifdef _LP64
+    c = __ ConvI2L(c);
+    extra = C->top();
+#endif
+    Node* call = kit.make_runtime_call(GraphKit::RC_LEAF|GraphKit::RC_NO_FP,
+                                       OptoRuntime::fast_arraycopy_Type(),
+                                       CAST_FROM_FN_PTR(address, StubRoutines::jshort_disjoint_arraycopy()),
+                                       "jshort_disjoint_arraycopy", TypeAryPtr::CHARS,
+                                       src_ptr, dst_ptr, c, extra);
+    start = __ AddI(start, count);
+  }
+  return start;
+}
+
+
+void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
+  // Log a little info about the transformation
+  sc->maybe_log_transform();
+
+  // pull the JVMState of the allocation into a SafePointNode to serve as
+  // as a shim for the insertion of the new code.
+  JVMState* jvms     = sc->begin()->jvms()->clone_shallow(C);
+  uint size = sc->begin()->req();
+  SafePointNode* map = new (C, size) SafePointNode(size, jvms);
+
+  // copy the control and memory state from the final call into our
+  // new starting state.  This allows any preceeding tests to feed
+  // into the new section of code.
+  for (uint i1 = 0; i1 < TypeFunc::Parms; i1++) {
+    map->init_req(i1, sc->end()->in(i1));
+  }
+  // blow away old allocation arguments
+  for (uint i1 = TypeFunc::Parms; i1 < jvms->debug_start(); i1++) {
+    map->init_req(i1, C->top());
+  }
+  // Copy the rest of the inputs for the JVMState
+  for (uint i1 = jvms->debug_start(); i1 < sc->begin()->req(); i1++) {
+    map->init_req(i1, sc->begin()->in(i1));
+  }
+  // Make sure the memory state is a MergeMem for parsing.
+  if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
+    map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
+  }
+
+  jvms->set_map(map);
+  map->ensure_stack(jvms, jvms->method()->max_stack());
+
+
+  // disconnect all the old StringBuilder calls from the graph
+  sc->eliminate_unneeded_control();
+
+  // At this point all the old work has been completely removed from
+  // the graph and the saved JVMState exists at the point where the
+  // final toString call used to be.
+  GraphKit kit(jvms);
+
+  // There may be uncommon traps which are still using the
+  // intermediate states and these need to be rewritten to point at
+  // the JVMState at the beginning of the transformation.
+  sc->convert_uncommon_traps(kit, jvms);
+
+  // Now insert the logic to compute the size of the string followed
+  // by all the logic to construct array and resulting string.
+
+  Node* null_string = __ makecon(TypeInstPtr::make(C->env()->the_null_string()));
+
+  // Create a region for the overflow checks to merge into.
+  int args = MAX2(sc->num_arguments(), 1);
+  RegionNode* overflow = new (C, args) RegionNode(args);
+  kit.gvn().set_type(overflow, Type::CONTROL);
+
+  // Create a hook node to hold onto the individual sizes since they
+  // are need for the copying phase.
+  Node* string_sizes = new (C, args) Node(args);
+
+  Node* length = __ intcon(0);
+  for (int argi = 0; argi < sc->num_arguments(); argi++) {
+    Node* arg = sc->argument(argi);
+    switch (sc->mode(argi)) {
+      case StringConcat::IntMode: {
+        Node* string_size = int_stringSize(kit, arg);
+
+        // accumulate total
+        length = __ AddI(length, string_size);
+
+        // Cache this value for the use by int_toString
+        string_sizes->init_req(argi, string_size);
+        break;
+      }
+      case StringConcat::StringMode: {
+        const Type* type = kit.gvn().type(arg);
+        if (type == TypePtr::NULL_PTR) {
+          // replace the argument with the null checked version
+          arg = null_string;
+          sc->set_argument(argi, arg);
+        } else if (!type->higher_equal(TypeInstPtr::NOTNULL)) {
+          // s = s != null ? s : "null";
+          // length = length + (s.count - s.offset);
+          RegionNode *r = new (C, 3) RegionNode(3);
+          kit.gvn().set_type(r, Type::CONTROL);
+          Node *phi = new (C, 3) PhiNode(r, type);
+          kit.gvn().set_type(phi, phi->bottom_type());
+          Node* p = __ Bool(__ CmpP(arg, kit.null()), BoolTest::ne);
+          IfNode* iff = kit.create_and_map_if(kit.control(), p, PROB_MIN, COUNT_UNKNOWN);
+          Node* notnull = __ IfTrue(iff);
+          Node* isnull =  __ IfFalse(iff);
+          kit.set_control(notnull); // set control for the cast_not_null
+          r->init_req(1, notnull);
+          phi->init_req(1, kit.cast_not_null(arg, false));
+          r->init_req(2, isnull);
+          phi->init_req(2, null_string);
+          kit.set_control(r);
+          C->record_for_igvn(r);
+          C->record_for_igvn(phi);
+          // replace the argument with the null checked version
+          arg = phi;
+          sc->set_argument(argi, arg);
+        }
+        //         Node* offset = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, offset_offset),
+        //                                      TypeInt::INT, T_INT, offset_field_idx);
+        Node* count = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()),
+                                    TypeInt::INT, T_INT, count_field_idx);
+        length = __ AddI(length, count);
+        string_sizes->init_req(argi, NULL);
+        break;
+      }
+      case StringConcat::CharMode: {
+        // one character only
+        length = __ AddI(length, __ intcon(1));
+        break;
+      }
+      default:
+        ShouldNotReachHere();
+    }
+    if (argi > 0) {
+      // Check that the sum hasn't overflowed
+      IfNode* iff = kit.create_and_map_if(kit.control(),
+                                          __ Bool(__ CmpI(length, __ intcon(0)), BoolTest::lt),
+                                          PROB_MIN, COUNT_UNKNOWN);
+      kit.set_control(__ IfFalse(iff));
+      overflow->set_req(argi, __ IfTrue(iff));
+    }
+  }
+
+  {
+    // Hook
+    PreserveJVMState pjvms(&kit);
+    kit.set_control(overflow);
+    kit.uncommon_trap(Deoptimization::Reason_intrinsic,
+                      Deoptimization::Action_make_not_entrant);
+  }
+
+  // length now contains the number of characters needed for the
+  // char[] so create a new AllocateArray for the char[]
+  Node* char_array = NULL;
+  {
+    PreserveReexecuteState preexecs(&kit);
+    // The original jvms is for an allocation of either a String or
+    // StringBuffer so no stack adjustment is necessary for proper
+    // reexecution.  If we deoptimize in the slow path the bytecode
+    // will be reexecuted and the char[] allocation will be thrown away.
+    kit.jvms()->set_should_reexecute(true);
+    char_array = kit.new_array(__ makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_CHAR))),
+                               length, 1);
+  }
+
+  // Mark the allocation so that zeroing is skipped since the code
+  // below will overwrite the entire array
+  AllocateArrayNode* char_alloc = AllocateArrayNode::Ideal_array_allocation(char_array, _gvn);
+  char_alloc->maybe_set_complete(_gvn);
+
+  // Now copy the string representations into the final char[]
+  Node* start = __ intcon(0);
+  for (int argi = 0; argi < sc->num_arguments(); argi++) {
+    Node* arg = sc->argument(argi);
+    switch (sc->mode(argi)) {
+      case StringConcat::IntMode: {
+        Node* end = __ AddI(start, string_sizes->in(argi));
+        // getChars words backwards so pass the ending point as well as the start
+        int_getChars(kit, arg, char_array, start, end);
+        start = end;
+        break;
+      }
+      case StringConcat::StringMode: {
+        start = copy_string(kit, arg, char_array, start);
+        break;
+      }
+      case StringConcat::CharMode: {
+        __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
+                           arg, T_CHAR, char_adr_idx);
+        start = __ AddI(start, __ intcon(1));
+        break;
+      }
+      default:
+        ShouldNotReachHere();
+    }
+  }
+
+  // If we're not reusing an existing String allocation then allocate one here.
+  Node* result = sc->string_alloc();
+  if (result == NULL) {
+    PreserveReexecuteState preexecs(&kit);
+    // The original jvms is for an allocation of either a String or
+    // StringBuffer so no stack adjustment is necessary for proper
+    // reexecution.
+    kit.jvms()->set_should_reexecute(true);
+    result = kit.new_instance(__ makecon(TypeKlassPtr::make(C->env()->String_klass())));
+  }
+
+  // Intialize the string
+  kit.store_to_memory(kit.control(), kit.basic_plus_adr(result, java_lang_String::offset_offset_in_bytes()),
+                      __ intcon(0), T_INT, offset_field_idx);
+  kit.store_to_memory(kit.control(), kit.basic_plus_adr(result, java_lang_String::count_offset_in_bytes()),
+                      length, T_INT, count_field_idx);
+  kit.store_to_memory(kit.control(), kit.basic_plus_adr(result, java_lang_String::value_offset_in_bytes()),
+                      char_array, T_OBJECT, value_field_idx);
+
+  // hook up the outgoing control and result
+  kit.replace_call(sc->end(), result);
+
+  // Unhook any hook nodes
+  string_sizes->disconnect_inputs(NULL);
+  sc->cleanup();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/opto/stringopts.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class StringConcat;
+
+class PhaseStringOpts : public Phase {
+  friend class StringConcat;
+
+ private:
+  PhaseGVN* _gvn;
+
+  // List of dead nodes to clean up aggressively at the end
+  Unique_Node_List dead_worklist;
+
+  // Memory slices needed for code gen
+  int char_adr_idx;
+  int value_field_idx;
+  int count_field_idx;
+  int offset_field_idx;
+
+  // Integer.sizeTable - used for int to String conversion
+  ciField* size_table_field;
+
+  // A set for use by various stages
+  VectorSet _visited;
+
+  // Collect a list of all SB.toString calls
+  Node_List collect_toString_calls();
+
+  // Examine the use of the SB alloc to see if it can be replace with
+  // a single string construction.
+  StringConcat* build_candidate(CallStaticJavaNode* call);
+
+  // Replace all the SB calls in concat with an optimization String allocation
+  void replace_string_concat(StringConcat* concat);
+
+  // Load the value of a static field, performing any constant folding.
+  Node* fetch_static_field(GraphKit& kit, ciField* field);
+
+  // Compute the number of characters required to represent the int value
+  Node* int_stringSize(GraphKit& kit, Node* value);
+
+  // Copy the characters representing value into char_array starting at start
+  void int_getChars(GraphKit& kit, Node* value, Node* char_array, Node* start, Node* end);
+
+  // Copy of the contents of the String str into char_array starting at index start.
+  Node* copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start);
+
+  // Clean up any leftover nodes
+  void record_dead_node(Node* node);
+  void remove_dead_nodes();
+
+  PhaseGVN* gvn() { return _gvn; }
+
+  enum {
+    // max length of constant string copy unrolling in copy_string
+    unroll_string_copy_length = 6
+  };
+
+ public:
+  PhaseStringOpts(PhaseGVN* gvn, Unique_Node_List* worklist);
+};
--- a/src/share/vm/opto/subnode.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/subnode.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1244,8 +1244,7 @@
   if( t1 == Type::TOP ) return Type::TOP;
   if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
   double d = t1->getd();
-  if( d < 0.0 ) return Type::DOUBLE;
-  return TypeD::make( SharedRuntime::dcos( d ) );
+  return TypeD::make( StubRoutines::intrinsic_cos( d ) );
 }
 
 //=============================================================================
@@ -1256,8 +1255,7 @@
   if( t1 == Type::TOP ) return Type::TOP;
   if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
   double d = t1->getd();
-  if( d < 0.0 ) return Type::DOUBLE;
-  return TypeD::make( SharedRuntime::dsin( d ) );
+  return TypeD::make( StubRoutines::intrinsic_sin( d ) );
 }
 
 //=============================================================================
@@ -1268,8 +1266,7 @@
   if( t1 == Type::TOP ) return Type::TOP;
   if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
   double d = t1->getd();
-  if( d < 0.0 ) return Type::DOUBLE;
-  return TypeD::make( SharedRuntime::dtan( d ) );
+  return TypeD::make( StubRoutines::intrinsic_tan( d ) );
 }
 
 //=============================================================================
@@ -1280,8 +1277,7 @@
   if( t1 == Type::TOP ) return Type::TOP;
   if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
   double d = t1->getd();
-  if( d < 0.0 ) return Type::DOUBLE;
-  return TypeD::make( SharedRuntime::dlog( d ) );
+  return TypeD::make( StubRoutines::intrinsic_log( d ) );
 }
 
 //=============================================================================
@@ -1292,8 +1288,7 @@
   if( t1 == Type::TOP ) return Type::TOP;
   if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
   double d = t1->getd();
-  if( d < 0.0 ) return Type::DOUBLE;
-  return TypeD::make( SharedRuntime::dlog10( d ) );
+  return TypeD::make( StubRoutines::intrinsic_log10( d ) );
 }
 
 //=============================================================================
@@ -1304,8 +1299,7 @@
   if( t1 == Type::TOP ) return Type::TOP;
   if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
   double d = t1->getd();
-  if( d < 0.0 ) return Type::DOUBLE;
-  return TypeD::make( SharedRuntime::dexp( d ) );
+  return TypeD::make( StubRoutines::intrinsic_exp( d ) );
 }
 
 
@@ -1323,5 +1317,5 @@
   double d2 = t2->getd();
   if( d1 < 0.0 ) return Type::DOUBLE;
   if( d2 < 0.0 ) return Type::DOUBLE;
-  return TypeD::make( SharedRuntime::dpow( d1, d2 ) );
+  return TypeD::make( StubRoutines::intrinsic_pow( d1, d2 ) );
 }
--- a/src/share/vm/opto/superword.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/superword.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1921,6 +1921,11 @@
   }
   // Match AddP(base, AddP(ptr, k*iv [+ invariant]), constant)
   Node* base = adr->in(AddPNode::Base);
+  //unsafe reference could not be aligned appropriately without runtime checking
+  if (base == NULL || base->bottom_type() == Type::TOP) {
+    assert(!valid(), "unsafe access");
+    return;
+  }
   for (int i = 0; i < 3; i++) {
     if (!scaled_iv_plus_offset(adr->in(AddPNode::Offset))) {
       assert(!valid(), "too complex");
--- a/src/share/vm/opto/type.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/type.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -2431,7 +2431,7 @@
 //------------------------------make_from_constant-----------------------------
 // Make a java pointer from an oop constant
 const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) {
-  if (o->is_method_data() || o->is_method()) {
+  if (o->is_method_data() || o->is_method() || o->is_cpcache()) {
     // Treat much like a typeArray of bytes, like below, but fake the type...
     const Type* etype = (Type*)get_const_basic_type(T_BYTE);
     const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
@@ -3969,7 +3969,7 @@
   const TypeFunc* tf = C->last_tf(method); // check cache
   if (tf != NULL)  return tf;  // The hit rate here is almost 50%.
   const TypeTuple *domain;
-  if (method->flags().is_static()) {
+  if (method->is_static()) {
     domain = TypeTuple::make_domain(NULL, method->signature());
   } else {
     domain = TypeTuple::make_domain(method->holder(), method->signature());
--- a/src/share/vm/opto/type.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/opto/type.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -847,9 +847,6 @@
   // Constant pointer to array
   static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
 
-  // Convenience
-  static const TypeAryPtr *make(ciObject* o);
-
   // Return a 'ptr' version of this type
   virtual const Type *cast_to_ptr_type(PTR ptr) const;
 
--- a/src/share/vm/prims/jni.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jni.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -396,11 +396,11 @@
   oop mirror     = NULL;
   int slot       = 0;
 
-  if (reflected->klass() == SystemDictionary::reflect_constructor_klass()) {
+  if (reflected->klass() == SystemDictionary::reflect_Constructor_klass()) {
     mirror = java_lang_reflect_Constructor::clazz(reflected);
     slot   = java_lang_reflect_Constructor::slot(reflected);
   } else {
-    assert(reflected->klass() == SystemDictionary::reflect_method_klass(), "wrong type");
+    assert(reflected->klass() == SystemDictionary::reflect_Method_klass(), "wrong type");
     mirror = java_lang_reflect_Method::clazz(reflected);
     slot   = java_lang_reflect_Method::slot(reflected);
   }
@@ -496,7 +496,7 @@
   klassOop super = Klass::cast(k)->java_super();
   // super2 is the value computed by the compiler's getSuperClass intrinsic:
   debug_only(klassOop super2 = ( Klass::cast(k)->oop_is_javaArray()
-                                 ? SystemDictionary::object_klass()
+                                 ? SystemDictionary::Object_klass()
                                  : Klass::cast(k)->super() ) );
   assert(super == super2,
          "java_super computation depends on interface, array, other super");
@@ -584,7 +584,7 @@
   if (thread->has_pending_exception()) {
     Handle ex(thread, thread->pending_exception());
     thread->clear_pending_exception();
-    if (ex->is_a(SystemDictionary::threaddeath_klass())) {
+    if (ex->is_a(SystemDictionary::ThreadDeath_klass())) {
       // Don't print anything if we are being killed.
     } else {
       jio_fprintf(defaultStream::error_stream(), "Exception ");
@@ -593,12 +593,12 @@
         jio_fprintf(defaultStream::error_stream(),
         "in thread \"%s\" ", thread->get_thread_name());
       }
-      if (ex->is_a(SystemDictionary::throwable_klass())) {
+      if (ex->is_a(SystemDictionary::Throwable_klass())) {
         JavaValue result(T_VOID);
         JavaCalls::call_virtual(&result,
                                 ex,
                                 KlassHandle(THREAD,
-                                  SystemDictionary::throwable_klass()),
+                                  SystemDictionary::Throwable_klass()),
                                 vmSymbolHandles::printStackTrace_name(),
                                 vmSymbolHandles::void_method_signature(),
                                 THREAD);
@@ -3231,6 +3231,21 @@
   jint result = JNI_ERR;
   DT_RETURN_MARK(CreateJavaVM, jint, (const jint&)result);
 
+  // We're about to use Atomic::xchg for synchronization.  Some Zero
+  // platforms use the GCC builtin __sync_lock_test_and_set for this,
+  // but __sync_lock_test_and_set is not guaranteed to do what we want
+  // on all architectures.  So we check it works before relying on it.
+#if defined(ZERO) && defined(ASSERT)
+  {
+    jint a = 0xcafebabe;
+    jint b = Atomic::xchg(0xdeadbeef, &a);
+    void *c = &a;
+    void *d = Atomic::xchg_ptr(&b, &c);
+    assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
+    assert(c == &b && d == &a, "Atomic::xchg_ptr() works");
+  }
+#endif // ZERO && ASSERT
+
   // At the moment it's only possible to have one Java VM,
   // since some of the runtime state is in global variables.
 
--- a/src/share/vm/prims/jniCheck.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jniCheck.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -341,7 +341,7 @@
     ReportJNIFatalError(thr, fatal_received_null_class);
   }
 
-  if (mirror->klass() != SystemDictionary::class_klass()) {
+  if (mirror->klass() != SystemDictionary::Class_klass()) {
     ReportJNIFatalError(thr, fatal_class_not_a_class);
   }
 
@@ -358,7 +358,7 @@
   assert(klass != NULL, "klass argument must have a value");
 
   if (!Klass::cast(klass)->oop_is_instance() ||
-      !instanceKlass::cast(klass)->is_subclass_of(SystemDictionary::throwable_klass())) {
+      !instanceKlass::cast(klass)->is_subclass_of(SystemDictionary::Throwable_klass())) {
     ReportJNIFatalError(thr, fatal_class_not_a_throwable_class);
   }
 }
--- a/src/share/vm/prims/jvm.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvm.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -80,7 +80,7 @@
 
     while (!vfst.at_end()) {
       methodOop m = vfst.method();
-      if (!vfst.method()->method_holder()->klass_part()->is_subclass_of(SystemDictionary::classloader_klass())&&
+      if (!vfst.method()->method_holder()->klass_part()->is_subclass_of(SystemDictionary::ClassLoader_klass())&&
           !vfst.method()->method_holder()->klass_part()->is_subclass_of(access_controller_klass) &&
           !vfst.method()->method_holder()->klass_part()->is_subclass_of(privileged_action_klass)) {
         break;
@@ -257,7 +257,7 @@
   Handle value_str  = java_lang_String::create_from_platform_dependent_str((value != NULL ? value : ""), CHECK);
   JavaCalls::call_virtual(&r,
                           props,
-                          KlassHandle(THREAD, SystemDictionary::properties_klass()),
+                          KlassHandle(THREAD, SystemDictionary::Properties_klass()),
                           vmSymbolHandles::put_name(),
                           vmSymbolHandles::object_object_object_signature(),
                           key_str,
@@ -495,7 +495,7 @@
     guarantee(klass->is_cloneable(), "all arrays are cloneable");
   } else {
     guarantee(obj->is_instance(), "should be instanceOop");
-    bool cloneable = klass->is_subtype_of(SystemDictionary::cloneable_klass());
+    bool cloneable = klass->is_subtype_of(SystemDictionary::Cloneable_klass());
     guarantee(cloneable == klass->is_cloneable(), "incorrect cloneable flag");
   }
 #endif
@@ -908,7 +908,7 @@
   // Special handling for primitive objects
   if (java_lang_Class::is_primitive(mirror)) {
     // Primitive objects does not have any interfaces
-    objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+    objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
     return (jobjectArray) JNIHandles::make_local(env, r);
   }
 
@@ -923,7 +923,7 @@
   }
 
   // Allocate result array
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), size, CHECK_NULL);
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), size, CHECK_NULL);
   objArrayHandle result (THREAD, r);
   // Fill in result
   if (klass->oop_is_instance()) {
@@ -934,8 +934,8 @@
     }
   } else {
     // All arrays implement java.lang.Cloneable and java.io.Serializable
-    result->obj_at_put(0, Klass::cast(SystemDictionary::cloneable_klass())->java_mirror());
-    result->obj_at_put(1, Klass::cast(SystemDictionary::serializable_klass())->java_mirror());
+    result->obj_at_put(0, Klass::cast(SystemDictionary::Cloneable_klass())->java_mirror());
+    result->obj_at_put(1, Klass::cast(SystemDictionary::Serializable_klass())->java_mirror());
   }
   return (jobjectArray) JNIHandles::make_local(env, result());
 JVM_END
@@ -1098,8 +1098,8 @@
     pending_exception = Handle(THREAD, PENDING_EXCEPTION);
     CLEAR_PENDING_EXCEPTION;
 
-    if ( pending_exception->is_a(SystemDictionary::exception_klass()) &&
-        !pending_exception->is_a(SystemDictionary::runtime_exception_klass())) {
+    if ( pending_exception->is_a(SystemDictionary::Exception_klass()) &&
+        !pending_exception->is_a(SystemDictionary::RuntimeException_klass())) {
       // Throw a java.security.PrivilegedActionException(Exception e) exception
       JavaCallArguments args(pending_exception);
       THROW_ARG_0(vmSymbolHandles::java_security_PrivilegedActionException(),
@@ -1190,7 +1190,7 @@
 
   // the resource area must be registered in case of a gc
   RegisterArrayForGC ragc(thread, local_array);
-  objArrayOop context = oopFactory::new_objArray(SystemDictionary::protectionDomain_klass(),
+  objArrayOop context = oopFactory::new_objArray(SystemDictionary::ProtectionDomain_klass(),
                                                  local_array->length(), CHECK_NULL);
   objArrayHandle h_context(thread, context);
   for (int index = 0; index < local_array->length(); index++) {
@@ -1251,7 +1251,7 @@
 
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
       ! Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_instance()) {
-    oop result = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+    oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
     return (jobjectArray)JNIHandles::make_local(env, result);
   }
 
@@ -1259,7 +1259,7 @@
 
   if (k->inner_classes()->length() == 0) {
     // Neither an inner nor outer class
-    oop result = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+    oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
     return (jobjectArray)JNIHandles::make_local(env, result);
   }
 
@@ -1269,7 +1269,7 @@
   int length = icls->length();
 
   // Allocate temp. result array
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), length/4, CHECK_NULL);
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), length/4, CHECK_NULL);
   objArrayHandle result (THREAD, r);
   int members = 0;
 
@@ -1299,7 +1299,7 @@
 
   if (members != length) {
     // Return array of right length
-    objArrayOop res = oopFactory::new_objArray(SystemDictionary::class_klass(), members, CHECK_NULL);
+    objArrayOop res = oopFactory::new_objArray(SystemDictionary::Class_klass(), members, CHECK_NULL);
     for(int i = 0; i < members; i++) {
       res->obj_at_put(i, result->obj_at(i));
     }
@@ -1318,19 +1318,20 @@
     return NULL;
   }
 
-  symbolOop simple_name = NULL;
+  bool inner_is_member = false;
   klassOop outer_klass
     = instanceKlass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass))
-                          )->compute_enclosing_class(simple_name, CHECK_NULL);
+                          )->compute_enclosing_class(&inner_is_member, CHECK_NULL);
   if (outer_klass == NULL)  return NULL;  // already a top-level class
-  if (simple_name == NULL)  return NULL;  // an anonymous class (inside a method)
+  if (!inner_is_member)  return NULL;     // an anonymous class (inside a method)
   return (jclass) JNIHandles::make_local(env, Klass::cast(outer_klass)->java_mirror());
 }
 JVM_END
 
 // should be in instanceKlass.cpp, but is here for historical reasons
 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
-                                                     symbolOop& simple_name_result, TRAPS) {
+                                                     bool* inner_is_member,
+                                                     TRAPS) {
   Thread* thread = THREAD;
   const int inner_class_info_index = inner_class_inner_class_info_offset;
   const int outer_class_info_index = inner_class_outer_class_info_offset;
@@ -1347,8 +1348,7 @@
   bool found = false;
   klassOop ok;
   instanceKlassHandle outer_klass;
-  bool inner_is_member = false;
-  int simple_name_index = 0;
+  *inner_is_member = false;
 
   // Find inner_klass attribute
   for (int i = 0; i < i_length && !found; i += inner_class_next_offset) {
@@ -1364,8 +1364,7 @@
         if (found && ooff != 0) {
           ok = i_cp->klass_at(ooff, CHECK_NULL);
           outer_klass = instanceKlassHandle(thread, ok);
-          simple_name_index = noff;
-          inner_is_member = true;
+          *inner_is_member = true;
         }
       }
     }
@@ -1377,7 +1376,7 @@
     if (encl_method_class_idx != 0) {
       ok = i_cp->klass_at(encl_method_class_idx, CHECK_NULL);
       outer_klass = instanceKlassHandle(thread, ok);
-      inner_is_member = false;
+      *inner_is_member = false;
     }
   }
 
@@ -1387,9 +1386,7 @@
   // Throws an exception if outer klass has not declared k as an inner klass
   // We need evidence that each klass knows about the other, or else
   // the system could allow a spoof of an inner class to gain access rights.
-  Reflection::check_for_inner_class(outer_klass, k, inner_is_member, CHECK_NULL);
-
-  simple_name_result = (inner_is_member ? i_cp->symbol_at(simple_name_index) : symbolOop(NULL));
+  Reflection::check_for_inner_class(outer_klass, k, *inner_is_member, CHECK_NULL);
   return outer_klass();
 }
 
@@ -1473,11 +1470,11 @@
   oop mirror    = NULL;
   int slot      = 0;
 
-  if (reflected->klass() == SystemDictionary::reflect_constructor_klass()) {
+  if (reflected->klass() == SystemDictionary::reflect_Constructor_klass()) {
     mirror = java_lang_reflect_Constructor::clazz(reflected);
     slot   = java_lang_reflect_Constructor::slot(reflected);
   } else {
-    assert(reflected->klass() == SystemDictionary::reflect_method_klass(),
+    assert(reflected->klass() == SystemDictionary::reflect_Method_klass(),
            "wrong type");
     mirror = java_lang_reflect_Method::clazz(reflected);
     slot   = java_lang_reflect_Method::slot(reflected);
@@ -1533,7 +1530,7 @@
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
       Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_javaArray()) {
     // Return empty array
-    oop res = oopFactory::new_objArray(SystemDictionary::reflect_field_klass(), 0, CHECK_NULL);
+    oop res = oopFactory::new_objArray(SystemDictionary::reflect_Field_klass(), 0, CHECK_NULL);
     return (jobjectArray) JNIHandles::make_local(env, res);
   }
 
@@ -1561,13 +1558,13 @@
   } else {
     num_fields = fields_len / instanceKlass::next_offset;
 
-    if (k() == SystemDictionary::throwable_klass()) {
+    if (k() == SystemDictionary::Throwable_klass()) {
       num_fields--;
       skip_backtrace = true;
     }
   }
 
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_field_klass(), num_fields, CHECK_NULL);
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Field_klass(), num_fields, CHECK_NULL);
   objArrayHandle result (THREAD, r);
 
   int out_idx = 0;
@@ -1601,7 +1598,7 @@
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
       || Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_javaArray()) {
     // Return empty array
-    oop res = oopFactory::new_objArray(SystemDictionary::reflect_method_klass(), 0, CHECK_NULL);
+    oop res = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), 0, CHECK_NULL);
     return (jobjectArray) JNIHandles::make_local(env, res);
   }
 
@@ -1625,7 +1622,7 @@
   }
 
   // Allocate result
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_method_klass(), num_methods, CHECK_NULL);
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), num_methods, CHECK_NULL);
   objArrayHandle result (THREAD, r);
 
   int out_idx = 0;
@@ -1653,7 +1650,7 @@
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
       || Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_javaArray()) {
     // Return empty array
-    oop res = oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), 0 , CHECK_NULL);
+    oop res = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0 , CHECK_NULL);
     return (jobjectArray) JNIHandles::make_local(env, res);
   }
 
@@ -1677,7 +1674,7 @@
   }
 
   // Allocate result
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), num_constructors, CHECK_NULL);
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), num_constructors, CHECK_NULL);
   objArrayHandle result(THREAD, r);
 
   int out_idx = 0;
@@ -1890,7 +1887,7 @@
   symbolHandle klass_name (THREAD, cp->klass_name_at(klass_ref));
   symbolHandle member_name(THREAD, cp->uncached_name_ref_at(index));
   symbolHandle member_sig (THREAD, cp->uncached_signature_ref_at(index));
-  objArrayOop  dest_o = oopFactory::new_objArray(SystemDictionary::string_klass(), 3, CHECK_NULL);
+  objArrayOop  dest_o = oopFactory::new_objArray(SystemDictionary::String_klass(), 3, CHECK_NULL);
   objArrayHandle dest(THREAD, dest_o);
   Handle str = java_lang_String::create_from_symbol(klass_name, CHECK_NULL);
   dest->obj_at_put(0, str());
@@ -2257,10 +2254,8 @@
   switch (cp->tag_at(cp_index).value()) {
     case JVM_CONSTANT_InterfaceMethodref:
     case JVM_CONSTANT_Methodref:
+    case JVM_CONSTANT_NameAndType:  // for invokedynamic
       return cp->uncached_name_ref_at(cp_index)->as_utf8();
-    case JVM_CONSTANT_NameAndType:
-      // for invokedynamic
-      return cp->nt_name_ref_at(cp_index)->as_utf8();
     default:
       fatal("JVM_GetCPMethodNameUTF: illegal constant");
   }
@@ -2277,10 +2272,8 @@
   switch (cp->tag_at(cp_index).value()) {
     case JVM_CONSTANT_InterfaceMethodref:
     case JVM_CONSTANT_Methodref:
+    case JVM_CONSTANT_NameAndType:  // for invokedynamic
       return cp->uncached_signature_ref_at(cp_index)->as_utf8();
-    case JVM_CONSTANT_NameAndType:
-      // for invokedynamic
-      return cp->nt_signature_ref_at(cp_index)->as_utf8();
     default:
       fatal("JVM_GetCPMethodSignatureUTF: illegal constant");
   }
@@ -2582,7 +2575,7 @@
   JavaValue result(T_VOID);
   JavaCalls::call_virtual(&result,
                           obj,
-                          KlassHandle(THREAD, SystemDictionary::thread_klass()),
+                          KlassHandle(THREAD, SystemDictionary::Thread_klass()),
                           vmSymbolHandles::run_method_name(),
                           vmSymbolHandles::void_method_signature(),
                           THREAD);
@@ -2680,7 +2673,7 @@
       // Fix for 4314342, 4145910, perhaps others: it now doesn't have
       // any effect on the "liveness" of a thread; see
       // JVM_IsThreadAlive, below.
-      if (java_throwable->is_a(SystemDictionary::threaddeath_klass())) {
+      if (java_throwable->is_a(SystemDictionary::ThreadDeath_klass())) {
         java_lang_Thread::set_stillborn(java_thread);
       }
       THROW_OOP(java_throwable);
@@ -3035,7 +3028,7 @@
   }
 
   // Create result array of type [Ljava/lang/Class;
-  objArrayOop result = oopFactory::new_objArray(SystemDictionary::class_klass(), depth, CHECK_NULL);
+  objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL);
   // Fill in mirrors corresponding to method holders
   int index = 0;
   while (first != NULL) {
@@ -4331,7 +4324,7 @@
   JvmtiVMObjectAllocEventCollector oam;
 
   int num_threads = tle.num_threads();
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::thread_klass(), num_threads, CHECK_NULL);
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::Thread_klass(), num_threads, CHECK_NULL);
   objArrayHandle threads_ah(THREAD, r);
 
   for (int i = 0; i < num_threads; i++) {
@@ -4365,7 +4358,7 @@
 
   // check if threads is not an array of objects of Thread class
   klassOop k = objArrayKlass::cast(ah->klass())->element_klass();
-  if (k != SystemDictionary::thread_klass()) {
+  if (k != SystemDictionary::Thread_klass()) {
     THROW_(vmSymbols::java_lang_IllegalArgumentException(), 0);
   }
 
@@ -4425,7 +4418,7 @@
   if (encl_method_class_idx == 0) {
     return NULL;
   }
-  objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::object_klass(), 3, CHECK_NULL);
+  objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::Object_klass(), 3, CHECK_NULL);
   objArrayHandle dest(THREAD, dest_o);
   klassOop enc_k = ik_h->constants()->klass_at(encl_method_class_idx, CHECK_NULL);
   dest->obj_at_put(0, Klass::cast(enc_k)->java_mirror());
@@ -4539,7 +4532,7 @@
                values_h->int_at(0) == java_lang_Thread::NEW,
              "Invalid threadStatus value");
 
-      objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+      objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
                                                1, /* only 1 substate */
                                                CHECK_NULL);
       names_h = objArrayHandle(THREAD, r);
@@ -4552,7 +4545,7 @@
                values_h->int_at(0) == java_lang_Thread::RUNNABLE,
              "Invalid threadStatus value");
 
-      objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+      objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
                                                1, /* only 1 substate */
                                                CHECK_NULL);
       names_h = objArrayHandle(THREAD, r);
@@ -4565,7 +4558,7 @@
                values_h->int_at(0) == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER,
              "Invalid threadStatus value");
 
-      objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+      objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
                                                1, /* only 1 substate */
                                                CHECK_NULL);
       names_h = objArrayHandle(THREAD, r);
@@ -4578,7 +4571,7 @@
                values_h->int_at(0) == java_lang_Thread::IN_OBJECT_WAIT &&
                values_h->int_at(1) == java_lang_Thread::PARKED,
              "Invalid threadStatus value");
-      objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+      objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
                                                2, /* number of substates */
                                                CHECK_NULL);
       names_h = objArrayHandle(THREAD, r);
@@ -4596,7 +4589,7 @@
                values_h->int_at(1) == java_lang_Thread::IN_OBJECT_WAIT_TIMED &&
                values_h->int_at(2) == java_lang_Thread::PARKED_TIMED,
              "Invalid threadStatus value");
-      objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+      objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
                                                3, /* number of substates */
                                                CHECK_NULL);
       names_h = objArrayHandle(THREAD, r);
@@ -4615,7 +4608,7 @@
       assert(values_h->length() == 1 &&
                values_h->int_at(0) == java_lang_Thread::TERMINATED,
              "Invalid threadStatus value");
-      objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+      objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
                                                1, /* only 1 substate */
                                                CHECK_NULL);
       names_h = objArrayHandle(THREAD, r);
@@ -4650,4 +4643,3 @@
 #endif // KERNEL
 }
 JVM_END
-
--- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -402,7 +402,7 @@
 
     address scopes_data = nm->scopes_data_begin();
     for( pcd = nm->scopes_pcs_begin(); pcd < nm->scopes_pcs_end(); ++pcd ) {
-      ScopeDesc sc0(nm, pcd->scope_decode_offset(), pcd->should_reexecute());
+      ScopeDesc sc0(nm, pcd->scope_decode_offset(), pcd->should_reexecute(), pcd->return_oop());
       ScopeDesc *sd  = &sc0;
       while( !sd->is_top() ) { sd = sd->sender(); }
       int bci = sd->bci();
--- a/src/share/vm/prims/jvmtiEnter.xsl	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiEnter.xsl	Wed Mar 24 17:16:33 2010 -0700
@@ -773,7 +773,7 @@
     </xsl:apply-templates>
     <xsl:text>
     }
-    if (!thread_oop-&gt;is_a(SystemDictionary::thread_klass())) {
+    if (!thread_oop-&gt;is_a(SystemDictionary::Thread_klass())) {
 </xsl:text>
     <xsl:apply-templates select=".." mode="traceError">     
       <xsl:with-param name="err">JVMTI_ERROR_INVALID_THREAD</xsl:with-param>
@@ -857,7 +857,7 @@
     </xsl:apply-templates>
     <xsl:text>
   }
-  if (!k_mirror->is_a(SystemDictionary::class_klass())) {
+  if (!k_mirror->is_a(SystemDictionary::Class_klass())) {
 </xsl:text>
     <xsl:apply-templates select=".." mode="traceError">     
       <xsl:with-param name="err">JVMTI_ERROR_INVALID_CLASS</xsl:with-param>
--- a/src/share/vm/prims/jvmtiEnv.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,15 +32,15 @@
  // FIXLATER: hook into JvmtiTrace
 #define TraceJVMTICalls false
 
-JvmtiEnv::JvmtiEnv() : JvmtiEnvBase() {
+JvmtiEnv::JvmtiEnv(jint version) : JvmtiEnvBase(version) {
 }
 
 JvmtiEnv::~JvmtiEnv() {
 }
 
 JvmtiEnv*
-JvmtiEnv::create_a_jvmti() {
-  return new JvmtiEnv();
+JvmtiEnv::create_a_jvmti(jint version) {
+  return new JvmtiEnv(version);
 }
 
 // VM operation class to copy jni function table at safepoint.
@@ -133,7 +133,7 @@
     if (thread_oop == NULL) {
       return JVMTI_ERROR_INVALID_THREAD;
     }
-    if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+    if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
       return JVMTI_ERROR_INVALID_THREAD;
     }
     JavaThread* java_thread = java_lang_Thread::thread(thread_oop);
@@ -199,7 +199,7 @@
     if (k_mirror == NULL) {
       return JVMTI_ERROR_INVALID_CLASS;
     }
-    if (!k_mirror->is_a(SystemDictionary::class_klass())) {
+    if (!k_mirror->is_a(SystemDictionary::Class_klass())) {
       return JVMTI_ERROR_INVALID_CLASS;
     }
 
@@ -266,7 +266,7 @@
   oop mirror = JNIHandles::resolve_external_guard(object);
   NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT);
 
-  if (mirror->klass() == SystemDictionary::class_klass()) {
+  if (mirror->klass() == SystemDictionary::Class_klass()) {
     if (!java_lang_Class::is_primitive(mirror)) {
         mirror = java_lang_Class::as_klassOop(mirror);
         assert(mirror != NULL, "class for non-primitive mirror must exist");
@@ -327,7 +327,7 @@
     if (thread_oop == NULL) {
       return JVMTI_ERROR_INVALID_THREAD;
     }
-    if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+    if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
       return JVMTI_ERROR_INVALID_THREAD;
     }
     java_thread = java_lang_Thread::thread(thread_oop);
@@ -411,8 +411,15 @@
   if (phase == JVMTI_PHASE_ONLOAD) {
     Arguments::append_sysclasspath(segment);
     return JVMTI_ERROR_NONE;
-  } else {
-    assert(phase == JVMTI_PHASE_LIVE, "sanity check");
+  } else if (use_version_1_0_semantics()) {
+    // This JvmtiEnv requested version 1.0 semantics and this function
+    // is only allowed in the ONLOAD phase in version 1.0 so we need to
+    // return an error here.
+    return JVMTI_ERROR_WRONG_PHASE;
+  } else if (phase == JVMTI_PHASE_LIVE) {
+    // The phase is checked by the wrapper that called this function,
+    // but this thread could be racing with the thread that is
+    // terminating the VM so we check one more time.
 
     // create the zip entry
     ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
@@ -433,6 +440,8 @@
     }
     ClassLoader::add_to_list(zip_entry);
     return JVMTI_ERROR_NONE;
+  } else {
+    return JVMTI_ERROR_WRONG_PHASE;
   }
 
 } /* end AddToBootstrapClassLoaderSearch */
@@ -451,11 +460,12 @@
       }
     }
     return JVMTI_ERROR_NONE;
-  } else {
+  } else if (phase == JVMTI_PHASE_LIVE) {
+    // The phase is checked by the wrapper that called this function,
+    // but this thread could be racing with the thread that is
+    // terminating the VM so we check one more time.
     HandleMark hm;
 
-    assert(phase == JVMTI_PHASE_LIVE, "sanity check");
-
     // create the zip entry (which will open the zip file and hence
     // check that the segment is indeed a zip file).
     ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
@@ -504,6 +514,8 @@
     }
 
     return JVMTI_ERROR_NONE;
+  } else {
+    return JVMTI_ERROR_WRONG_PHASE;
   }
 } /* end AddToSystemClassLoaderSearch */
 
@@ -580,7 +592,6 @@
     break;
   case JVMTI_VERBOSE_GC:
     PrintGC = value != 0;
-    TraceClassUnloading = value != 0;
     break;
   case JVMTI_VERBOSE_JNI:
     PrintJNIResolving = value != 0;
@@ -620,7 +631,7 @@
     thread_oop = JNIHandles::resolve_external_guard(thread);
   }
 
-  if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass())) {
+  if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
     return JVMTI_ERROR_INVALID_THREAD;
   }
 
@@ -858,7 +869,7 @@
 jvmtiError
 JvmtiEnv::InterruptThread(jthread thread) {
   oop thread_oop = JNIHandles::resolve_external_guard(thread);
-  if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass()))
+  if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass()))
     return JVMTI_ERROR_INVALID_THREAD;
 
   JavaThread* current_thread  = JavaThread::current();
@@ -895,7 +906,7 @@
   } else {
     thread_oop = JNIHandles::resolve_external_guard(thread);
   }
-  if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass()))
+  if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass()))
     return JVMTI_ERROR_INVALID_THREAD;
 
   Handle thread_obj(current_thread, thread_oop);
@@ -1061,7 +1072,7 @@
 jvmtiError
 JvmtiEnv::RunAgentThread(jthread thread, jvmtiStartFunction proc, const void* arg, jint priority) {
   oop thread_oop = JNIHandles::resolve_external_guard(thread);
-  if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass())) {
+  if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
     return JVMTI_ERROR_INVALID_THREAD;
   }
   if (priority < JVMTI_THREAD_MIN_PRIORITY || priority > JVMTI_THREAD_MAX_PRIORITY) {
@@ -2863,6 +2874,14 @@
 // is_obsolete_ptr - pre-checked for NULL
 jvmtiError
 JvmtiEnv::IsMethodObsolete(methodOop method_oop, jboolean* is_obsolete_ptr) {
+  if (use_version_1_0_semantics() &&
+      get_capabilities()->can_redefine_classes == 0) {
+    // This JvmtiEnv requested version 1.0 semantics and this function
+    // requires the can_redefine_classes capability in version 1.0 so
+    // we need to return an error here.
+    return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
+  }
+
   if (method_oop == NULL || method_oop->is_obsolete()) {
     *is_obsolete_ptr = true;
   } else {
--- a/src/share/vm/prims/jvmtiEnvBase.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiEnvBase.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -123,7 +123,26 @@
 }
 
 
-JvmtiEnvBase::JvmtiEnvBase() : _env_event_enable() {
+bool
+JvmtiEnvBase::use_version_1_0_semantics() {
+  int major, minor, micro;
+
+  JvmtiExport::decode_version_values(_version, &major, &minor, &micro);
+  return major == 1 && minor == 0;  // micro version doesn't matter here
+}
+
+
+bool
+JvmtiEnvBase::use_version_1_1_semantics() {
+  int major, minor, micro;
+
+  JvmtiExport::decode_version_values(_version, &major, &minor, &micro);
+  return major == 1 && minor == 1;  // micro version doesn't matter here
+}
+
+
+JvmtiEnvBase::JvmtiEnvBase(jint version) : _env_event_enable() {
+  _version = version;
   _env_local_storage = NULL;
   _tag_map = NULL;
   _native_method_prefix_count = 0;
@@ -508,7 +527,7 @@
 JavaThread *
 JvmtiEnvBase::get_JavaThread(jthread jni_thread) {
   oop t = JNIHandles::resolve_external_guard(jni_thread);
-  if (t == NULL || !t->is_a(SystemDictionary::thread_klass())) {
+  if (t == NULL || !t->is_a(SystemDictionary::Thread_klass())) {
     return NULL;
   }
   // The following returns NULL if the thread has not yet run or is in
@@ -1250,7 +1269,7 @@
   for (int i = 0; i < _thread_count; ++i) {
     jthread jt = _thread_list[i];
     oop thread_oop = JNIHandles::resolve_external_guard(jt);
-    if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass())) {
+    if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
       set_result(JVMTI_ERROR_INVALID_THREAD);
       return;
     }
--- a/src/share/vm/prims/jvmtiEnvBase.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,6 +76,7 @@
 
   jvmtiEnv _jvmti_external;
   jint _magic;
+  jint _version;  // version value passed to JNI GetEnv()
   JvmtiEnvBase* _next;
   bool _is_retransformable;
   const void *_env_local_storage;     // per env agent allocated data.
@@ -91,7 +92,7 @@
   int    _native_method_prefix_count;
 
  protected:
-  JvmtiEnvBase();
+  JvmtiEnvBase(jint version);
   ~JvmtiEnvBase();
   void dispose();
   void env_dispose();
@@ -122,6 +123,9 @@
 
   bool is_valid();
 
+  bool use_version_1_0_semantics();  // agent asked for version 1.0
+  bool use_version_1_1_semantics();  // agent asked for version 1.1
+
   bool is_retransformable()                        { return _is_retransformable; }
 
   static ByteSize jvmti_external_offset() {
--- a/src/share/vm/prims/jvmtiEventController.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiEventController.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -82,7 +82,7 @@
                                THREAD_START_BIT | THREAD_END_BIT |
                                DYNAMIC_CODE_GENERATED_BIT;
 static const jlong  GLOBAL_EVENT_BITS = ~THREAD_FILTERED_EVENT_BITS;
-
+static const jlong  SHOULD_POST_ON_EXCEPTIONS_BITS = EXCEPTION_BITS | METHOD_EXIT_BIT | FRAME_POP_BIT;
 
 ///////////////////////////////////////////////////////////////
 //
@@ -511,7 +511,12 @@
         leave_interp_only_mode(state);
       }
     }
+
+    // update the JavaThread cached value for thread-specific should_post_on_exceptions value
+    bool should_post_on_exceptions = (any_env_enabled & SHOULD_POST_ON_EXCEPTIONS_BITS) != 0;
+    state->set_should_post_on_exceptions(should_post_on_exceptions);
   }
+
   return any_env_enabled;
 }
 
@@ -615,6 +620,10 @@
 
     // set global truly enabled, that is, any thread in any environment
     JvmtiEventController::_universal_global_event_enabled.set_bits(any_env_thread_enabled);
+
+    // set global should_post_on_exceptions
+    JvmtiExport::set_should_post_on_exceptions((any_env_thread_enabled & SHOULD_POST_ON_EXCEPTIONS_BITS) != 0);
+
   }
 
   EC_TRACE(("JVMTI [-] # recompute enabled - after %llx", any_env_thread_enabled));
--- a/src/share/vm/prims/jvmtiExport.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiExport.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -319,7 +319,27 @@
 
 jint
 JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) {
-  /* To Do: add version checks */
+  // The JVMTI_VERSION_INTERFACE_JVMTI part of the version number
+  // has already been validated in JNI GetEnv().
+  int major, minor, micro;
+
+  // micro version doesn't matter here (yet?)
+  decode_version_values(version, &major, &minor, &micro);
+  switch (major) {
+  case 1:
+      switch (minor) {
+      case 0:  // version 1.0.<micro> is recognized
+      case 1:  // version 1.1.<micro> is recognized
+          break;
+
+      default:
+          return JNI_EVERSION;  // unsupported minor version number
+      }
+      break;
+
+  default:
+      return JNI_EVERSION;  // unsupported major version number
+  }
 
   if (JvmtiEnv::get_phase() == JVMTI_PHASE_LIVE) {
     JavaThread* current_thread = (JavaThread*) ThreadLocalStorage::thread();
@@ -328,13 +348,13 @@
     __ENTRY(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread)
     debug_only(VMNativeEntryWrapper __vew;)
 
-    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
+    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti(version);
     *penv = jvmti_env->jvmti_external();  // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
     return JNI_OK;
 
   } else if (JvmtiEnv::get_phase() == JVMTI_PHASE_ONLOAD) {
     // not live, no thread to transition
-    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
+    JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti(version);
     *penv = jvmti_env->jvmti_external();  // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
     return JNI_OK;
 
@@ -345,6 +365,15 @@
   }
 }
 
+
+void
+JvmtiExport::decode_version_values(jint version, int * major, int * minor,
+                                   int * micro) {
+  *major = (version & JVMTI_VERSION_MASK_MAJOR) >> JVMTI_VERSION_SHIFT_MAJOR;
+  *minor = (version & JVMTI_VERSION_MASK_MINOR) >> JVMTI_VERSION_SHIFT_MINOR;
+  *micro = (version & JVMTI_VERSION_MASK_MICRO) >> JVMTI_VERSION_SHIFT_MICRO;
+}
+
 void JvmtiExport::enter_primordial_phase() {
   JvmtiEnvBase::set_phase(JVMTI_PHASE_PRIMORDIAL);
 }
@@ -627,7 +656,7 @@
   klassOop k = obj->klass();
 
   // if the object is a java.lang.Class then return the java mirror
-  if (k == SystemDictionary::class_klass()) {
+  if (k == SystemDictionary::Class_klass()) {
     if (!java_lang_Class::is_primitive(obj)) {
       k = java_lang_Class::as_klassOop(obj);
       assert(k != NULL, "class for non-primitive mirror must exist");
@@ -657,11 +686,11 @@
   jvmtiAddrLocationMap *_map;
   const void *_compile_info;
  public:
-  JvmtiCompiledMethodLoadEventMark(JavaThread *thread, nmethod *nm)
+  JvmtiCompiledMethodLoadEventMark(JavaThread *thread, nmethod *nm, void* compile_info_ptr = NULL)
           : JvmtiMethodEventMark(thread,methodHandle(thread, nm->method())) {
     _code_data = nm->code_begin();
     _code_size = nm->code_size();
-    _compile_info = NULL; /* no info for our VM. */
+    _compile_info = compile_info_ptr; // Set void pointer of compiledMethodLoad Event. Default value is NULL.
     JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &_map, &_map_length);
   }
   ~JvmtiCompiledMethodLoadEventMark() {
@@ -848,7 +877,7 @@
 bool              JvmtiExport::_can_get_source_debug_extension            = false;
 bool              JvmtiExport::_can_maintain_original_method_order        = false;
 bool              JvmtiExport::_can_post_interpreter_events               = false;
-bool              JvmtiExport::_can_post_exceptions                       = false;
+bool              JvmtiExport::_can_post_on_exceptions                    = false;
 bool              JvmtiExport::_can_post_breakpoint                       = false;
 bool              JvmtiExport::_can_post_field_access                     = false;
 bool              JvmtiExport::_can_post_field_modification               = false;
@@ -879,6 +908,7 @@
 bool              JvmtiExport::_should_post_object_free                   = false;
 bool              JvmtiExport::_should_post_resource_exhausted            = false;
 bool              JvmtiExport::_should_post_vm_object_alloc               = false;
+bool              JvmtiExport::_should_post_on_exceptions                 = false;
 
 ////////////////////////////////////////////////////////////////////////////////////////////////
 
@@ -1723,6 +1753,46 @@
   }
 }
 
+// Returns a record containing inlining information for the given nmethod
+jvmtiCompiledMethodLoadInlineRecord* create_inline_record(nmethod* nm) {
+  jint numstackframes = 0;
+  jvmtiCompiledMethodLoadInlineRecord* record = (jvmtiCompiledMethodLoadInlineRecord*)NEW_RESOURCE_OBJ(jvmtiCompiledMethodLoadInlineRecord);
+  record->header.kind = JVMTI_CMLR_INLINE_INFO;
+  record->header.next = NULL;
+  record->header.majorinfoversion = JVMTI_CMLR_MAJOR_VERSION_1;
+  record->header.minorinfoversion = JVMTI_CMLR_MINOR_VERSION_0;
+  record->numpcs = 0;
+  for(PcDesc* p = nm->scopes_pcs_begin(); p < nm->scopes_pcs_end(); p++) {
+   if(p->scope_decode_offset() == DebugInformationRecorder::serialized_null) continue;
+   record->numpcs++;
+  }
+  record->pcinfo = (PCStackInfo*)(NEW_RESOURCE_ARRAY(PCStackInfo, record->numpcs));
+  int scope = 0;
+  for(PcDesc* p = nm->scopes_pcs_begin(); p < nm->scopes_pcs_end(); p++) {
+    if(p->scope_decode_offset() == DebugInformationRecorder::serialized_null) continue;
+    void* pc_address = (void*)p->real_pc(nm);
+    assert(pc_address != NULL, "pc_address must be non-null");
+    record->pcinfo[scope].pc = pc_address;
+    numstackframes=0;
+    for(ScopeDesc* sd = nm->scope_desc_at(p->real_pc(nm));sd != NULL;sd = sd->sender()) {
+      numstackframes++;
+    }
+    assert(numstackframes != 0, "numstackframes must be nonzero.");
+    record->pcinfo[scope].methods = (jmethodID *)NEW_RESOURCE_ARRAY(jmethodID, numstackframes);
+    record->pcinfo[scope].bcis = (jint *)NEW_RESOURCE_ARRAY(jint, numstackframes);
+    record->pcinfo[scope].numstackframes = numstackframes;
+    int stackframe = 0;
+    for(ScopeDesc* sd = nm->scope_desc_at(p->real_pc(nm));sd != NULL;sd = sd->sender()) {
+      // sd->method() can be NULL for stubs but not for nmethods. To be completely robust, include an assert that we should never see a null sd->method()
+      assert(!sd->method().is_null(), "sd->method() cannot be null.");
+      record->pcinfo[scope].methods[stackframe] = sd->method()->jmethod_id();
+      record->pcinfo[scope].bcis[stackframe] = sd->bci();
+      stackframe++;
+    }
+    scope++;
+  }
+  return record;
+}
 
 void JvmtiExport::post_compiled_method_load(nmethod *nm) {
   // If there are pending CompiledMethodUnload events then these are
@@ -1751,7 +1821,11 @@
                 (nm->method() == NULL) ? "NULL" : nm->method()->name()->as_C_string()));
 
       ResourceMark rm(thread);
-      JvmtiCompiledMethodLoadEventMark jem(thread, nm);
+
+      // Add inlining information
+      jvmtiCompiledMethodLoadInlineRecord* inlinerecord = create_inline_record(nm);
+      // Pass inlining information through the void pointer
+      JvmtiCompiledMethodLoadEventMark jem(thread, nm, inlinerecord);
       JvmtiJavaThreadEventTransition jet(thread);
       jvmtiEventCompiledMethodLoad callback = env->callbacks()->CompiledMethodLoad;
       if (callback != NULL) {
@@ -1896,7 +1970,7 @@
       if (collector != NULL && collector->is_enabled()) {
         // Don't record classes as these will be notified via the ClassLoad
         // event.
-        if (obj->klass() != SystemDictionary::class_klass()) {
+        if (obj->klass() != SystemDictionary::Class_klass()) {
           collector->record_allocation(obj);
         }
       }
--- a/src/share/vm/prims/jvmtiExport.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiExport.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,7 @@
   JVMTI_SUPPORT_FLAG(can_get_source_debug_extension)
   JVMTI_SUPPORT_FLAG(can_maintain_original_method_order)
   JVMTI_SUPPORT_FLAG(can_post_interpreter_events)
-  JVMTI_SUPPORT_FLAG(can_post_exceptions)
+  JVMTI_SUPPORT_FLAG(can_post_on_exceptions)
   JVMTI_SUPPORT_FLAG(can_post_breakpoint)
   JVMTI_SUPPORT_FLAG(can_post_field_access)
   JVMTI_SUPPORT_FLAG(can_post_field_modification)
@@ -93,6 +93,7 @@
   JVMTI_SUPPORT_FLAG(should_post_data_dump)
   JVMTI_SUPPORT_FLAG(should_post_garbage_collection_start)
   JVMTI_SUPPORT_FLAG(should_post_garbage_collection_finish)
+  JVMTI_SUPPORT_FLAG(should_post_on_exceptions)
 
   // ------ the below maybe don't have to be (but are for now)
   // fixed conditions here ------------
@@ -236,6 +237,8 @@
   static bool is_jvmti_version(jint version)                      { return (version & JVMTI_VERSION_MASK) == JVMTI_VERSION_VALUE; }
   static bool is_jvmdi_version(jint version)                      { return (version & JVMTI_VERSION_MASK) == JVMDI_VERSION_VALUE; }
   static jint get_jvmti_interface(JavaVM *jvm, void **penv, jint version);
+  static void decode_version_values(jint version, int * major, int * minor,
+                                    int * micro);
 
   // single stepping management methods
   static void at_single_stepping_point(JavaThread *thread, methodOop method, address location) KERNEL_RETURN;
--- a/src/share/vm/prims/jvmtiHpp.xsl	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiHpp.xsl	Wed Mar 24 17:16:33 2010 -0700
@@ -1,6 +1,6 @@
 <?xml version="1.0"?> 
 <!--
- Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -48,12 +48,12 @@
 
 private:
     
-    JvmtiEnv();
+    JvmtiEnv(jint version);
     ~JvmtiEnv();
 
 public:
 
-    static JvmtiEnv* create_a_jvmti();
+    static JvmtiEnv* create_a_jvmti(jint version);
 
 </xsl:text>
   <xsl:apply-templates select="functionsection"/>
--- a/src/share/vm/prims/jvmtiManageCapabilities.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,8 +115,10 @@
   jvmtiCapabilities jc;
 
   memset(&jc, 0, sizeof(jc));
+#ifndef CC_INTERP
   jc.can_pop_frame = 1;
   jc.can_force_early_return = 1;
+#endif // !CC_INTERP
   jc.can_get_source_debug_extension = 1;
   jc.can_access_local_variables = 1;
   jc.can_maintain_original_method_order = 1;
@@ -355,7 +357,7 @@
     avail.can_access_local_variables  ||
     avail.can_redefine_classes ||
     avail.can_retransform_classes);
-  JvmtiExport::set_can_post_exceptions(
+  JvmtiExport::set_can_post_on_exceptions(
     avail.can_generate_exception_events ||
     avail.can_generate_frame_pop_events ||
     avail.can_generate_method_exit_events);
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -3214,7 +3214,7 @@
     //  - all instanceKlasses for redefined classes reused & contents updated
     the_class->vtable()->initialize_vtable(false, THREAD);
     the_class->itable()->initialize_itable(false, THREAD);
-    assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::threaddeath_klass())), "redefine exception");
+    assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())), "redefine exception");
   }
 
   // Leave arrays of jmethodIDs and itable index cache unchanged
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -579,7 +579,7 @@
 // If the object is a java.lang.Class then return the klassOop,
 // otherwise return the original object
 static inline oop klassOop_if_java_lang_Class(oop o) {
-  if (o->klass() == SystemDictionary::class_klass()) {
+  if (o->klass() == SystemDictionary::Class_klass()) {
     if (!java_lang_Class::is_primitive(o)) {
       o = (oop)java_lang_Class::as_klassOop(o);
       assert(o != NULL, "class for non-primitive mirror must exist");
@@ -644,7 +644,7 @@
     } else {
       // if the object represents a runtime class then use the
       // tag for java.lang.Class
-      _klass = SystemDictionary::class_klass();
+      _klass = SystemDictionary::Class_klass();
     }
     _klass_tag = tag_for(tag_map, _klass);
   }
@@ -747,7 +747,7 @@
       // get referrer class tag.
       klassOop k = (_referrer == referrer) ?  // Check if referrer is a class...
           _referrer->klass()                  // No, just get its class
-         : SystemDictionary::class_klass();   // Yes, its class is Class
+         : SystemDictionary::Class_klass();   // Yes, its class is Class
       _referrer_klass_tag = tag_for(tag_map, k);
     }
   }
@@ -1126,7 +1126,7 @@
                                          oop str,
                                          void* user_data)
 {
-  assert(str->klass() == SystemDictionary::string_klass(), "not a string");
+  assert(str->klass() == SystemDictionary::String_klass(), "not a string");
 
   // get the string value and length
   // (string value may be offset from the base)
@@ -1186,7 +1186,7 @@
   // for static fields only the index will be set
   static jvmtiHeapReferenceInfo reference_info = { 0 };
 
-  assert(obj->klass() == SystemDictionary::class_klass(), "not a class");
+  assert(obj->klass() == SystemDictionary::Class_klass(), "not a class");
   if (java_lang_Class::is_primitive(obj)) {
     return 0;
   }
@@ -1498,7 +1498,7 @@
   if (callbacks()->primitive_field_callback != NULL && obj->is_instance()) {
     jint res;
     jvmtiPrimitiveFieldCallback cb = callbacks()->primitive_field_callback;
-    if (obj->klass() == SystemDictionary::class_klass()) {
+    if (obj->klass() == SystemDictionary::Class_klass()) {
       res = invoke_primitive_field_callback_for_static_fields(&wrapper,
                                                                     obj,
                                                                     cb,
@@ -1515,7 +1515,7 @@
   // string callback
   if (!is_array &&
       callbacks()->string_primitive_value_callback != NULL &&
-      obj->klass() == SystemDictionary::string_klass()) {
+      obj->klass() == SystemDictionary::String_klass()) {
     jint res = invoke_string_value_callback(
                 callbacks()->string_primitive_value_callback,
                 &wrapper,
@@ -2381,7 +2381,7 @@
 
 // invoke the string value callback
 inline bool CallbackInvoker::report_string_value(oop str) {
-  assert(str->klass() == SystemDictionary::string_klass(), "not a string");
+  assert(str->klass() == SystemDictionary::String_klass(), "not a string");
 
   AdvancedHeapWalkContext* context = advanced_context();
   assert(context->string_primitive_value_callback() != NULL, "no callback");
@@ -2928,7 +2928,7 @@
 
     // super (only if something more interesting than java.lang.Object)
     klassOop java_super = ik->java_super();
-    if (java_super != NULL && java_super != SystemDictionary::object_klass()) {
+    if (java_super != NULL && java_super != SystemDictionary::Object_klass()) {
       oop super = Klass::cast(java_super)->java_mirror();
       if (!CallbackInvoker::report_superclass_reference(mirror, super)) {
         return false;
@@ -3070,7 +3070,7 @@
 
   // if the object is a java.lang.String
   if (is_reporting_string_values() &&
-      o->klass() == SystemDictionary::string_klass()) {
+      o->klass() == SystemDictionary::String_klass()) {
     if (!CallbackInvoker::report_string_value(o)) {
       return false;
     }
@@ -3255,7 +3255,7 @@
 
   // instance
   if (o->is_instance()) {
-    if (o->klass() == SystemDictionary::class_klass()) {
+    if (o->klass() == SystemDictionary::Class_klass()) {
       o = klassOop_if_java_lang_Class(o);
       if (o->is_klass()) {
         // a java.lang.Class
--- a/src/share/vm/prims/jvmtiThreadState.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/jvmtiThreadState.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -381,6 +381,9 @@
   static ByteSize earlyret_value_offset() { return byte_offset_of(JvmtiThreadState, _earlyret_value); }
 
   void oops_do(OopClosure* f); // GC support
+
+public:
+  void set_should_post_on_exceptions(bool val) { _thread->set_should_post_on_exceptions_flag(val ? JNI_TRUE : JNI_FALSE); }
 };
 
 class RedefineVerifyMark : public StackObj {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,1401 @@
+/*
+ * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * JSR 292 reference implementation: method handle structure analysis
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_methodHandleWalk.cpp.incl"
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleChain
+
+void MethodHandleChain::set_method_handle(Handle mh, TRAPS) {
+  if (!java_dyn_MethodHandle::is_instance(mh()))  lose("bad method handle", CHECK);
+
+  // set current method handle and unpack partially
+  _method_handle = mh;
+  _is_last       = false;
+  _is_bound      = false;
+  _arg_slot      = -1;
+  _arg_type      = T_VOID;
+  _conversion    = -1;
+  _last_invoke   = Bytecodes::_nop;  //arbitrary non-garbage
+
+  if (sun_dyn_DirectMethodHandle::is_instance(mh())) {
+    set_last_method(mh(), THREAD);
+    return;
+  }
+  if (sun_dyn_AdapterMethodHandle::is_instance(mh())) {
+    _conversion = AdapterMethodHandle_conversion();
+    assert(_conversion != -1, "bad conv value");
+    assert(sun_dyn_BoundMethodHandle::is_instance(mh()), "also BMH");
+  }
+  if (sun_dyn_BoundMethodHandle::is_instance(mh())) {
+    if (!is_adapter())          // keep AMH and BMH separate in this model
+      _is_bound = true;
+    _arg_slot = BoundMethodHandle_vmargslot();
+    oop target = MethodHandle_vmtarget_oop();
+    if (!is_bound() || java_dyn_MethodHandle::is_instance(target)) {
+      _arg_type = compute_bound_arg_type(target, NULL, _arg_slot, CHECK);
+    } else if (target != NULL && target->is_method()) {
+      methodOop m = (methodOop) target;
+      _arg_type = compute_bound_arg_type(NULL, m, _arg_slot, CHECK);
+      set_last_method(mh(), CHECK);
+    } else {
+      _is_bound = false;  // lose!
+    }
+  }
+  if (is_bound() && _arg_type == T_VOID) {
+    lose("bad vmargslot", CHECK);
+  }
+  if (!is_bound() && !is_adapter()) {
+    lose("unrecognized MH type", CHECK);
+  }
+}
+
+
+void MethodHandleChain::set_last_method(oop target, TRAPS) {
+  _is_last = true;
+  klassOop receiver_limit_oop = NULL;
+  int flags = 0;
+  methodOop m = MethodHandles::decode_method(target, receiver_limit_oop, flags);
+  _last_method = methodHandle(THREAD, m);
+  if ((flags & MethodHandles::_dmf_has_receiver) == 0)
+    _last_invoke = Bytecodes::_invokestatic;
+  else if ((flags & MethodHandles::_dmf_does_dispatch) == 0)
+    _last_invoke = Bytecodes::_invokespecial;
+  else if ((flags & MethodHandles::_dmf_from_interface) != 0)
+    _last_invoke = Bytecodes::_invokeinterface;
+  else
+    _last_invoke = Bytecodes::_invokevirtual;
+}
+
+
+BasicType MethodHandleChain::compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS) {
+  // There is no direct indication of whether the argument is primitive or not.
+  // It is implied by the _vmentry code, and by the MethodType of the target.
+  // FIXME: Make it explicit MethodHandleImpl refactors out from MethodHandle
+  BasicType arg_type = T_VOID;
+  if (target != NULL) {
+    oop mtype = java_dyn_MethodHandle::type(target);
+    int arg_num = MethodHandles::argument_slot_to_argnum(mtype, arg_slot);
+    if (arg_num >= 0) {
+      oop ptype = java_dyn_MethodType::ptype(mtype, arg_num);
+      arg_type = java_lang_Class::as_BasicType(ptype);
+    }
+  } else if (m != NULL) {
+    // figure out the argument type from the slot
+    // FIXME: make this explicit in the MH
+    int cur_slot = m->size_of_parameters();
+    if (arg_slot >= cur_slot)
+      return T_VOID;
+    if (!m->is_static()) {
+      cur_slot -= type2size[T_OBJECT];
+      if (cur_slot == arg_slot)
+        return T_OBJECT;
+    }
+    for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) {
+      BasicType bt = ss.type();
+      cur_slot -= type2size[bt];
+      if (cur_slot <= arg_slot) {
+        if (cur_slot == arg_slot)
+          arg_type = bt;
+        break;
+      }
+    }
+  }
+  if (arg_type == T_ARRAY)
+    arg_type = T_OBJECT;
+  return arg_type;
+}
+
+
+void MethodHandleChain::lose(const char* msg, TRAPS) {
+  assert(false, "lose");
+  _lose_message = msg;
+  if (!THREAD->is_Java_thread() || ((JavaThread*)THREAD)->thread_state() != _thread_in_vm) {
+    // throw a preallocated exception
+    THROW_OOP(Universe::virtual_machine_error_instance());
+  }
+  THROW_MSG(vmSymbols::java_lang_InternalError(), msg);
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleWalker
+
+Bytecodes::Code MethodHandleWalker::conversion_code(BasicType src, BasicType dest) {
+  if (is_subword_type(src)) {
+    src = T_INT;          // all subword src types act like int
+  }
+  if (src == dest) {
+    return Bytecodes::_nop;
+  }
+
+#define SRC_DEST(s,d) (((int)(s) << 4) + (int)(d))
+  switch (SRC_DEST(src, dest)) {
+  case SRC_DEST(T_INT, T_LONG):           return Bytecodes::_i2l;
+  case SRC_DEST(T_INT, T_FLOAT):          return Bytecodes::_i2f;
+  case SRC_DEST(T_INT, T_DOUBLE):         return Bytecodes::_i2d;
+  case SRC_DEST(T_INT, T_BYTE):           return Bytecodes::_i2b;
+  case SRC_DEST(T_INT, T_CHAR):           return Bytecodes::_i2c;
+  case SRC_DEST(T_INT, T_SHORT):          return Bytecodes::_i2s;
+
+  case SRC_DEST(T_LONG, T_INT):           return Bytecodes::_l2i;
+  case SRC_DEST(T_LONG, T_FLOAT):         return Bytecodes::_l2f;
+  case SRC_DEST(T_LONG, T_DOUBLE):        return Bytecodes::_l2d;
+
+  case SRC_DEST(T_FLOAT, T_INT):          return Bytecodes::_f2i;
+  case SRC_DEST(T_FLOAT, T_LONG):         return Bytecodes::_f2l;
+  case SRC_DEST(T_FLOAT, T_DOUBLE):       return Bytecodes::_f2d;
+
+  case SRC_DEST(T_DOUBLE, T_INT):         return Bytecodes::_d2i;
+  case SRC_DEST(T_DOUBLE, T_LONG):        return Bytecodes::_d2l;
+  case SRC_DEST(T_DOUBLE, T_FLOAT):       return Bytecodes::_d2f;
+  }
+#undef SRC_DEST
+
+  // cannot do it in one step, or at all
+  return Bytecodes::_illegal;
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleWalker::walk
+//
+MethodHandleWalker::ArgToken
+MethodHandleWalker::walk(TRAPS) {
+  ArgToken empty = ArgToken();  // Empty return value.
+
+  walk_incoming_state(CHECK_(empty));
+
+  for (;;) {
+    set_method_handle(chain().method_handle_oop());
+
+    assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
+
+    if (chain().is_adapter()) {
+      int conv_op = chain().adapter_conversion_op();
+      int arg_slot = chain().adapter_arg_slot();
+      SlotState* arg_state = slot_state(arg_slot);
+      if (arg_state == NULL
+          && conv_op > sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW) {
+        lose("bad argument index", CHECK_(empty));
+      }
+
+      // perform the adapter action
+      switch (chain().adapter_conversion_op()) {
+      case sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY:
+        // No changes to arguments; pass the bits through.
+        break;
+
+      case sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW: {
+        // To keep the verifier happy, emit bitwise ("raw") conversions as needed.
+        // See MethodHandles::same_basic_type_for_arguments for allowed conversions.
+        Handle incoming_mtype(THREAD, chain().method_type_oop());
+        oop outgoing_mh_oop = chain().vmtarget_oop();
+        if (!java_dyn_MethodHandle::is_instance(outgoing_mh_oop))
+          lose("outgoing target not a MethodHandle", CHECK_(empty));
+        Handle outgoing_mtype(THREAD, java_dyn_MethodHandle::type(outgoing_mh_oop));
+        outgoing_mh_oop = NULL;  // GC safety
+
+        int nptypes = java_dyn_MethodType::ptype_count(outgoing_mtype());
+        if (nptypes != java_dyn_MethodType::ptype_count(incoming_mtype()))
+          lose("incoming and outgoing parameter count do not agree", CHECK_(empty));
+
+        for (int i = 0, slot = _outgoing.length() - 1; slot >= 0; slot--) {
+          SlotState* arg_state = slot_state(slot);
+          if (arg_state->_type == T_VOID)  continue;
+          ArgToken arg = _outgoing.at(slot)._arg;
+
+          klassOop  in_klass  = NULL;
+          klassOop  out_klass = NULL;
+          BasicType inpbt  = java_lang_Class::as_BasicType(java_dyn_MethodType::ptype(incoming_mtype(), i), &in_klass);
+          BasicType outpbt = java_lang_Class::as_BasicType(java_dyn_MethodType::ptype(outgoing_mtype(), i), &out_klass);
+          assert(inpbt == arg.basic_type(), "sanity");
+
+          if (inpbt != outpbt) {
+            vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(inpbt, outpbt);
+            if (iid == vmIntrinsics::_none) {
+              lose("no raw conversion method", CHECK_(empty));
+            }
+            ArgToken arglist[2];
+            arglist[0] = arg;         // outgoing 'this'
+            arglist[1] = ArgToken();  // sentinel
+            arg = make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty));
+            change_argument(inpbt, slot, outpbt, arg);
+          }
+
+          i++;  // We need to skip void slots at the top of the loop.
+        }
+
+        BasicType inrbt  = java_lang_Class::as_BasicType(java_dyn_MethodType::rtype(incoming_mtype()));
+        BasicType outrbt = java_lang_Class::as_BasicType(java_dyn_MethodType::rtype(outgoing_mtype()));
+        if (inrbt != outrbt) {
+          if (inrbt == T_INT && outrbt == T_VOID) {
+            // See comments in MethodHandles::same_basic_type_for_arguments.
+          } else {
+            assert(false, "IMPLEMENT ME");
+            lose("no raw conversion method", CHECK_(empty));
+          }
+        }
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_CHECK_CAST: {
+        // checkcast the Nth outgoing argument in place
+        klassOop dest_klass = NULL;
+        BasicType dest = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), &dest_klass);
+        assert(dest == T_OBJECT, "");
+        assert(dest == arg_state->_type, "");
+        ArgToken arg = arg_state->_arg;
+        ArgToken new_arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty));
+        assert(arg.index() == new_arg.index(), "should be the same index");
+        debug_only(dest_klass = (klassOop)badOop);
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM: {
+        // i2l, etc., on the Nth outgoing argument in place
+        BasicType src = chain().adapter_conversion_src_type(),
+                  dest = chain().adapter_conversion_dest_type();
+        Bytecodes::Code bc = conversion_code(src, dest);
+        ArgToken arg = arg_state->_arg;
+        if (bc == Bytecodes::_nop) {
+          break;
+        } else if (bc != Bytecodes::_illegal) {
+          arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty));
+        } else if (is_subword_type(dest)) {
+          bc = conversion_code(src, T_INT);
+          if (bc != Bytecodes::_illegal) {
+            arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty));
+            bc = conversion_code(T_INT, dest);
+            arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty));
+          }
+        }
+        if (bc == Bytecodes::_illegal) {
+          lose("bad primitive conversion", CHECK_(empty));
+        }
+        change_argument(src, arg_slot, dest, arg);
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM: {
+        // checkcast to wrapper type & call intValue, etc.
+        BasicType dest = chain().adapter_conversion_dest_type();
+        ArgToken arg = arg_state->_arg;
+        arg = make_conversion(T_OBJECT, SystemDictionary::box_klass(dest),
+                              Bytecodes::_checkcast, arg, CHECK_(empty));
+        vmIntrinsics::ID unboxer = vmIntrinsics::for_unboxing(dest);
+        if (unboxer == vmIntrinsics::_none) {
+          lose("no unboxing method", CHECK_(empty));
+        }
+        ArgToken arglist[2];
+        arglist[0] = arg;         // outgoing 'this'
+        arglist[1] = ArgToken();  // sentinel
+        arg = make_invoke(NULL, unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty));
+        change_argument(T_OBJECT, arg_slot, dest, arg);
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_PRIM_TO_REF: {
+        // call wrapper type.valueOf
+        BasicType src = chain().adapter_conversion_src_type();
+        ArgToken arg = arg_state->_arg;
+        vmIntrinsics::ID boxer = vmIntrinsics::for_boxing(src);
+        if (boxer == vmIntrinsics::_none) {
+          lose("no boxing method", CHECK_(empty));
+        }
+        ArgToken arglist[2];
+        arglist[0] = arg;         // outgoing value
+        arglist[1] = ArgToken();  // sentinel
+        assert(false, "I think the argument count must be 1 instead of 0");
+        arg = make_invoke(NULL, boxer, Bytecodes::_invokevirtual, false, 0, &arglist[0], CHECK_(empty));
+        change_argument(src, arg_slot, T_OBJECT, arg);
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS: {
+        int dest_arg_slot = chain().adapter_conversion_vminfo();
+        if (!slot_has_argument(dest_arg_slot)) {
+          lose("bad swap index", CHECK_(empty));
+        }
+        // a simple swap between two arguments
+        SlotState* dest_arg_state = slot_state(dest_arg_slot);
+        SlotState temp = (*dest_arg_state);
+        (*dest_arg_state) = (*arg_state);
+        (*arg_state) = temp;
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_ROT_ARGS: {
+        int dest_arg_slot = chain().adapter_conversion_vminfo();
+        if (!slot_has_argument(dest_arg_slot) || arg_slot == dest_arg_slot) {
+          lose("bad rotate index", CHECK_(empty));
+        }
+        SlotState* dest_arg_state = slot_state(dest_arg_slot);
+        // Rotate the source argument (plus following N slots) into the
+        // position occupied by the dest argument (plus following N slots).
+        int rotate_count = type2size[dest_arg_state->_type];
+        // (no other rotate counts are currently supported)
+        if (arg_slot < dest_arg_slot) {
+          for (int i = 0; i < rotate_count; i++) {
+            SlotState temp = _outgoing.at(arg_slot);
+            _outgoing.remove_at(arg_slot);
+            _outgoing.insert_before(dest_arg_slot + rotate_count - 1, temp);
+          }
+        } else { // arg_slot > dest_arg_slot
+          for (int i = 0; i < rotate_count; i++) {
+            SlotState temp = _outgoing.at(arg_slot + rotate_count - 1);
+            _outgoing.remove_at(arg_slot + rotate_count - 1);
+            _outgoing.insert_before(dest_arg_slot, temp);
+          }
+        }
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_DUP_ARGS: {
+        int dup_slots = chain().adapter_conversion_stack_pushes();
+        if (dup_slots <= 0) {
+          lose("bad dup count", CHECK_(empty));
+        }
+        for (int i = 0; i < dup_slots; i++) {
+          SlotState* dup = slot_state(arg_slot + 2*i);
+          if (dup == NULL)              break;  // safety net
+          if (dup->_type != T_VOID)     _outgoing_argc += 1;
+          _outgoing.insert_before(i, (*dup));
+        }
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_DROP_ARGS: {
+        int drop_slots = -chain().adapter_conversion_stack_pushes();
+        if (drop_slots <= 0) {
+          lose("bad drop count", CHECK_(empty));
+        }
+        for (int i = 0; i < drop_slots; i++) {
+          SlotState* drop = slot_state(arg_slot);
+          if (drop == NULL)             break;  // safety net
+          if (drop->_type != T_VOID)    _outgoing_argc -= 1;
+          _outgoing.remove_at(arg_slot);
+        }
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_COLLECT_ARGS: { //NYI, may GC
+        lose("unimplemented", CHECK_(empty));
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS: {
+        klassOop array_klass_oop = NULL;
+        BasicType array_type = java_lang_Class::as_BasicType(chain().adapter_arg_oop(),
+                                                             &array_klass_oop);
+        assert(array_type == T_OBJECT, "");
+        assert(Klass::cast(array_klass_oop)->oop_is_array(), "");
+        arrayKlassHandle array_klass(THREAD, array_klass_oop);
+        debug_only(array_klass_oop = (klassOop)badOop);
+
+        klassOop element_klass_oop = NULL;
+        BasicType element_type = java_lang_Class::as_BasicType(array_klass->component_mirror(),
+                                                               &element_klass_oop);
+        KlassHandle element_klass(THREAD, element_klass_oop);
+        debug_only(element_klass_oop = (klassOop)badOop);
+
+        // Fetch the argument, which we will cast to the required array type.
+        assert(arg_state->_type == T_OBJECT, "");
+        ArgToken array_arg = arg_state->_arg;
+        array_arg = make_conversion(T_OBJECT, array_klass(), Bytecodes::_checkcast, array_arg, CHECK_(empty));
+        change_argument(T_OBJECT, arg_slot, T_VOID, ArgToken(tt_void));
+
+        // Check the required length.
+        int spread_slots = 1 + chain().adapter_conversion_stack_pushes();
+        int spread_length = spread_slots;
+        if (type2size[element_type] == 2) {
+          if (spread_slots % 2 != 0)  spread_slots = -1;  // force error
+          spread_length = spread_slots / 2;
+        }
+        if (spread_slots < 0) {
+          lose("bad spread length", CHECK_(empty));
+        }
+
+        jvalue   length_jvalue;  length_jvalue.i = spread_length;
+        ArgToken length_arg = make_prim_constant(T_INT, &length_jvalue, CHECK_(empty));
+        // Call a built-in method known to the JVM to validate the length.
+        ArgToken arglist[3];
+        arglist[0] = array_arg;   // value to check
+        arglist[1] = length_arg;  // length to check
+        arglist[2] = ArgToken();  // sentinel
+        make_invoke(NULL, vmIntrinsics::_checkSpreadArgument,
+                    Bytecodes::_invokestatic, false, 3, &arglist[0], CHECK_(empty));
+
+        // Spread out the array elements.
+        Bytecodes::Code aload_op = Bytecodes::_aaload;
+        if (element_type != T_OBJECT) {
+          lose("primitive array NYI", CHECK_(empty));
+        }
+        int ap = arg_slot;
+        for (int i = 0; i < spread_length; i++) {
+          jvalue   offset_jvalue;  offset_jvalue.i = i;
+          ArgToken offset_arg = make_prim_constant(T_INT, &offset_jvalue, CHECK_(empty));
+          ArgToken element_arg = make_fetch(element_type, element_klass(), aload_op, array_arg, offset_arg, CHECK_(empty));
+          change_argument(T_VOID, ap, element_type, element_arg);
+          ap += type2size[element_type];
+        }
+        break;
+      }
+
+      case sun_dyn_AdapterMethodHandle::OP_FLYBY: //NYI, runs Java code
+      case sun_dyn_AdapterMethodHandle::OP_RICOCHET: //NYI, runs Java code
+        lose("unimplemented", CHECK_(empty));
+        break;
+
+      default:
+        lose("bad adapter conversion", CHECK_(empty));
+        break;
+      }
+    }
+
+    if (chain().is_bound()) {
+      // push a new argument
+      BasicType arg_type  = chain().bound_arg_type();
+      jint      arg_slot  = chain().bound_arg_slot();
+      oop       arg_oop   = chain().bound_arg_oop();
+      ArgToken  arg;
+      if (arg_type == T_OBJECT) {
+        arg = make_oop_constant(arg_oop, CHECK_(empty));
+      } else {
+        jvalue arg_value;
+        BasicType bt = java_lang_boxing_object::get_value(arg_oop, &arg_value);
+        if (bt == arg_type) {
+          arg = make_prim_constant(arg_type, &arg_value, CHECK_(empty));
+        } else {
+          lose("bad bound value", CHECK_(empty));
+        }
+      }
+      debug_only(arg_oop = badOop);
+      change_argument(T_VOID, arg_slot, arg_type, arg);
+    }
+
+    // this test must come after the body of the loop
+    if (!chain().is_last()) {
+      chain().next(CHECK_(empty));
+    } else {
+      break;
+    }
+  }
+
+  // finish the sequence with a tail-call to the ultimate target
+  // parameters are passed in logical order (recv 1st), not slot order
+  ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, _outgoing.length() + 1);
+  int ap = 0;
+  for (int i = _outgoing.length() - 1; i >= 0; i--) {
+    SlotState* arg_state = slot_state(i);
+    if (arg_state->_type == T_VOID)  continue;
+    arglist[ap++] = _outgoing.at(i)._arg;
+  }
+  assert(ap == _outgoing_argc, "");
+  arglist[ap] = ArgToken();  // add a sentinel, for the sake of asserts
+  return make_invoke(chain().last_method_oop(),
+                     vmIntrinsics::_none,
+                     chain().last_invoke_code(), true,
+                     ap, arglist, THREAD);
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleWalker::walk_incoming_state
+//
+void MethodHandleWalker::walk_incoming_state(TRAPS) {
+  Handle mtype(THREAD, chain().method_type_oop());
+  int nptypes = java_dyn_MethodType::ptype_count(mtype());
+  _outgoing_argc = nptypes;
+  int argp = nptypes - 1;
+  if (argp >= 0) {
+    _outgoing.at_grow(argp, make_state(T_VOID, ArgToken(tt_void))); // presize
+  }
+  for (int i = 0; i < nptypes; i++) {
+    klassOop  arg_type_klass = NULL;
+    BasicType arg_type = java_lang_Class::as_BasicType(
+                java_dyn_MethodType::ptype(mtype(), i), &arg_type_klass);
+    int index = new_local_index(arg_type);
+    ArgToken arg = make_parameter(arg_type, arg_type_klass, index, CHECK);
+    debug_only(arg_type_klass = (klassOop) NULL);
+    _outgoing.at_put(argp, make_state(arg_type, arg));
+    if (type2size[arg_type] == 2) {
+      // add the extra slot, so we can model the JVM stack
+      _outgoing.insert_before(argp+1, make_state(T_VOID, ArgToken(tt_void)));
+    }
+    --argp;
+  }
+  // call make_parameter at the end of the list for the return type
+  klassOop  ret_type_klass = NULL;
+  BasicType ret_type = java_lang_Class::as_BasicType(
+              java_dyn_MethodType::rtype(mtype()), &ret_type_klass);
+  ArgToken  ret = make_parameter(ret_type, ret_type_klass, -1, CHECK);
+  // ignore ret; client can catch it if needed
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleWalker::change_argument
+//
+// This is messy because some kinds of arguments are paired with
+// companion slots containing an empty value.
+void MethodHandleWalker::change_argument(BasicType old_type, int slot, BasicType new_type,
+                                         const ArgToken& new_arg) {
+  int old_size = type2size[old_type];
+  int new_size = type2size[new_type];
+  if (old_size == new_size) {
+    // simple case first
+    _outgoing.at_put(slot, make_state(new_type, new_arg));
+  } else if (old_size > new_size) {
+    for (int i = old_size - 1; i >= new_size; i--) {
+      assert((i != 0) == (_outgoing.at(slot + i)._type == T_VOID), "");
+      _outgoing.remove_at(slot + i);
+    }
+    if (new_size > 0)
+      _outgoing.at_put(slot, make_state(new_type, new_arg));
+    else
+      _outgoing_argc -= 1;      // deleted a real argument
+  } else {
+    for (int i = old_size; i < new_size; i++) {
+      _outgoing.insert_before(slot + i, make_state(T_VOID, ArgToken(tt_void)));
+    }
+    _outgoing.at_put(slot, make_state(new_type, new_arg));
+    if (old_size == 0)
+      _outgoing_argc += 1;      // inserted a real argument
+  }
+}
+
+
+#ifdef ASSERT
+int MethodHandleWalker::argument_count_slow() {
+  int args_seen = 0;
+  for (int i = _outgoing.length() - 1; i >= 0; i--) {
+    if (_outgoing.at(i)._type != T_VOID) {
+      ++args_seen;
+    }
+  }
+  return args_seen;
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleCompiler
+
+MethodHandleCompiler::MethodHandleCompiler(Handle root, methodHandle callee, bool is_invokedynamic, TRAPS)
+  : MethodHandleWalker(root, is_invokedynamic, THREAD),
+    _callee(callee),
+    _thread(THREAD),
+    _bytecode(THREAD, 50),
+    _constants(THREAD, 10),
+    _cur_stack(0),
+    _max_stack(0),
+    _rtype(T_ILLEGAL)
+{
+
+  // Element zero is always the null constant.
+  (void) _constants.append(NULL);
+
+  // Set name and signature index.
+  _name_index      = cpool_symbol_put(_callee->name());
+  _signature_index = cpool_symbol_put(_callee->signature());
+
+  // Get return type klass.
+  Handle first_mtype(THREAD, chain().method_type_oop());
+  // _rklass is NULL for primitives.
+  _rtype = java_lang_Class::as_BasicType(java_dyn_MethodType::rtype(first_mtype()), &_rklass);
+  if (_rtype == T_ARRAY)  _rtype = T_OBJECT;
+
+  int params = _callee->size_of_parameters();  // Incoming arguments plus receiver.
+  _num_params = for_invokedynamic() ? params - 1 : params;  // XXX Check if callee is static?
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleCompiler::compile
+//
+// Compile this MethodHandle into a bytecode adapter and return a
+// methodOop.
+methodHandle MethodHandleCompiler::compile(TRAPS) {
+  assert(_thread == THREAD, "must be same thread");
+  methodHandle nullHandle;
+  (void) walk(CHECK_(nullHandle));
+  return get_method_oop(CHECK_(nullHandle));
+}
+
+
+void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
+  Bytecodes::check(op);  // Are we legal?
+
+  switch (op) {
+  // b
+  case Bytecodes::_aconst_null:
+  case Bytecodes::_iconst_m1:
+  case Bytecodes::_iconst_0:
+  case Bytecodes::_iconst_1:
+  case Bytecodes::_iconst_2:
+  case Bytecodes::_iconst_3:
+  case Bytecodes::_iconst_4:
+  case Bytecodes::_iconst_5:
+  case Bytecodes::_lconst_0:
+  case Bytecodes::_lconst_1:
+  case Bytecodes::_fconst_0:
+  case Bytecodes::_fconst_1:
+  case Bytecodes::_fconst_2:
+  case Bytecodes::_dconst_0:
+  case Bytecodes::_dconst_1:
+  case Bytecodes::_iload_0:
+  case Bytecodes::_iload_1:
+  case Bytecodes::_iload_2:
+  case Bytecodes::_iload_3:
+  case Bytecodes::_lload_0:
+  case Bytecodes::_lload_1:
+  case Bytecodes::_lload_2:
+  case Bytecodes::_lload_3:
+  case Bytecodes::_fload_0:
+  case Bytecodes::_fload_1:
+  case Bytecodes::_fload_2:
+  case Bytecodes::_fload_3:
+  case Bytecodes::_dload_0:
+  case Bytecodes::_dload_1:
+  case Bytecodes::_dload_2:
+  case Bytecodes::_dload_3:
+  case Bytecodes::_aload_0:
+  case Bytecodes::_aload_1:
+  case Bytecodes::_aload_2:
+  case Bytecodes::_aload_3:
+  case Bytecodes::_istore_0:
+  case Bytecodes::_istore_1:
+  case Bytecodes::_istore_2:
+  case Bytecodes::_istore_3:
+  case Bytecodes::_lstore_0:
+  case Bytecodes::_lstore_1:
+  case Bytecodes::_lstore_2:
+  case Bytecodes::_lstore_3:
+  case Bytecodes::_fstore_0:
+  case Bytecodes::_fstore_1:
+  case Bytecodes::_fstore_2:
+  case Bytecodes::_fstore_3:
+  case Bytecodes::_dstore_0:
+  case Bytecodes::_dstore_1:
+  case Bytecodes::_dstore_2:
+  case Bytecodes::_dstore_3:
+  case Bytecodes::_astore_0:
+  case Bytecodes::_astore_1:
+  case Bytecodes::_astore_2:
+  case Bytecodes::_astore_3:
+  case Bytecodes::_i2l:
+  case Bytecodes::_i2f:
+  case Bytecodes::_i2d:
+  case Bytecodes::_i2b:
+  case Bytecodes::_i2c:
+  case Bytecodes::_i2s:
+  case Bytecodes::_l2i:
+  case Bytecodes::_l2f:
+  case Bytecodes::_l2d:
+  case Bytecodes::_f2i:
+  case Bytecodes::_f2l:
+  case Bytecodes::_f2d:
+  case Bytecodes::_d2i:
+  case Bytecodes::_d2l:
+  case Bytecodes::_d2f:
+  case Bytecodes::_ireturn:
+  case Bytecodes::_lreturn:
+  case Bytecodes::_freturn:
+  case Bytecodes::_dreturn:
+  case Bytecodes::_areturn:
+  case Bytecodes::_return:
+    assert(strcmp(Bytecodes::format(op), "b") == 0, "wrong bytecode format");
+    _bytecode.push(op);
+    break;
+
+  // bi
+  case Bytecodes::_ldc:
+  case Bytecodes::_iload:
+  case Bytecodes::_lload:
+  case Bytecodes::_fload:
+  case Bytecodes::_dload:
+  case Bytecodes::_aload:
+  case Bytecodes::_istore:
+  case Bytecodes::_lstore:
+  case Bytecodes::_fstore:
+  case Bytecodes::_dstore:
+  case Bytecodes::_astore:
+    assert(strcmp(Bytecodes::format(op), "bi") == 0, "wrong bytecode format");
+    assert((char) index == index, "index does not fit in 8-bit");
+    _bytecode.push(op);
+    _bytecode.push(index);
+    break;
+
+  // bii
+  case Bytecodes::_ldc2_w:
+  case Bytecodes::_checkcast:
+    assert(strcmp(Bytecodes::format(op), "bii") == 0, "wrong bytecode format");
+    assert((short) index == index, "index does not fit in 16-bit");
+    _bytecode.push(op);
+    _bytecode.push(index >> 8);
+    _bytecode.push(index);
+    break;
+
+  // bjj
+  case Bytecodes::_invokestatic:
+  case Bytecodes::_invokespecial:
+  case Bytecodes::_invokevirtual:
+    assert(strcmp(Bytecodes::format(op), "bjj") == 0, "wrong bytecode format");
+    assert((short) index == index, "index does not fit in 16-bit");
+    _bytecode.push(op);
+    _bytecode.push(index >> 8);
+    _bytecode.push(index);
+    break;
+
+  default:
+    ShouldNotReachHere();
+  }
+}
+
+
+void MethodHandleCompiler::emit_load(BasicType bt, int index) {
+  if (index <= 3) {
+    switch (bt) {
+    case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+    case T_INT:    emit_bc(Bytecodes::cast(Bytecodes::_iload_0 + index)); break;
+    case T_LONG:   emit_bc(Bytecodes::cast(Bytecodes::_lload_0 + index)); break;
+    case T_FLOAT:  emit_bc(Bytecodes::cast(Bytecodes::_fload_0 + index)); break;
+    case T_DOUBLE: emit_bc(Bytecodes::cast(Bytecodes::_dload_0 + index)); break;
+    case T_OBJECT: emit_bc(Bytecodes::cast(Bytecodes::_aload_0 + index)); break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+  else {
+    switch (bt) {
+    case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+    case T_INT:    emit_bc(Bytecodes::_iload, index); break;
+    case T_LONG:   emit_bc(Bytecodes::_lload, index); break;
+    case T_FLOAT:  emit_bc(Bytecodes::_fload, index); break;
+    case T_DOUBLE: emit_bc(Bytecodes::_dload, index); break;
+    case T_OBJECT: emit_bc(Bytecodes::_aload, index); break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+  stack_push(bt);
+}
+
+void MethodHandleCompiler::emit_store(BasicType bt, int index) {
+  if (index <= 3) {
+    switch (bt) {
+    case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+    case T_INT:    emit_bc(Bytecodes::cast(Bytecodes::_istore_0 + index)); break;
+    case T_LONG:   emit_bc(Bytecodes::cast(Bytecodes::_lstore_0 + index)); break;
+    case T_FLOAT:  emit_bc(Bytecodes::cast(Bytecodes::_fstore_0 + index)); break;
+    case T_DOUBLE: emit_bc(Bytecodes::cast(Bytecodes::_dstore_0 + index)); break;
+    case T_OBJECT: emit_bc(Bytecodes::cast(Bytecodes::_astore_0 + index)); break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+  else {
+    switch (bt) {
+    case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+    case T_INT:    emit_bc(Bytecodes::_istore, index); break;
+    case T_LONG:   emit_bc(Bytecodes::_lstore, index); break;
+    case T_FLOAT:  emit_bc(Bytecodes::_fstore, index); break;
+    case T_DOUBLE: emit_bc(Bytecodes::_dstore, index); break;
+    case T_OBJECT: emit_bc(Bytecodes::_astore, index); break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+  stack_pop(bt);
+}
+
+
+void MethodHandleCompiler::emit_load_constant(ArgToken arg) {
+  BasicType bt = arg.basic_type();
+  switch (bt) {
+  case T_INT: {
+    jint value = arg.get_jint();
+    if (-1 <= value && value <= 5)
+      emit_bc(Bytecodes::cast(Bytecodes::_iconst_0 + value));
+    else
+      emit_bc(Bytecodes::_ldc, cpool_int_put(value));
+    break;
+  }
+  case T_LONG: {
+    jlong value = arg.get_jlong();
+    if (0 <= value && value <= 1)
+      emit_bc(Bytecodes::cast(Bytecodes::_lconst_0 + (int) value));
+    else
+      emit_bc(Bytecodes::_ldc2_w, cpool_long_put(value));
+    break;
+  }
+  case T_FLOAT: {
+    jfloat value  = arg.get_jfloat();
+    if (value == 0.0 || value == 1.0 || value == 2.0)
+      emit_bc(Bytecodes::cast(Bytecodes::_fconst_0 + (int) value));
+    else
+      emit_bc(Bytecodes::_ldc, cpool_float_put(value));
+    break;
+  }
+  case T_DOUBLE: {
+    jdouble value = arg.get_jdouble();
+    if (value == 0.0 || value == 1.0)
+      emit_bc(Bytecodes::cast(Bytecodes::_dconst_0 + (int) value));
+    else
+      emit_bc(Bytecodes::_ldc2_w, cpool_double_put(value));
+    break;
+  }
+  case T_OBJECT: {
+    Handle value = arg.object();
+    if (value.is_null())
+      emit_bc(Bytecodes::_aconst_null);
+    else
+      emit_bc(Bytecodes::_ldc, cpool_object_put(value));
+    break;
+  }
+  default:
+    ShouldNotReachHere();
+  }
+  stack_push(bt);
+}
+
+
+MethodHandleWalker::ArgToken
+MethodHandleCompiler::make_conversion(BasicType type, klassOop tk, Bytecodes::Code op,
+                                      const ArgToken& src, TRAPS) {
+
+  BasicType srctype = src.basic_type();
+  int index = src.index();
+
+  switch (op) {
+  case Bytecodes::_i2l:
+  case Bytecodes::_i2f:
+  case Bytecodes::_i2d:
+  case Bytecodes::_i2b:
+  case Bytecodes::_i2c:
+  case Bytecodes::_i2s:
+
+  case Bytecodes::_l2i:
+  case Bytecodes::_l2f:
+  case Bytecodes::_l2d:
+
+  case Bytecodes::_f2i:
+  case Bytecodes::_f2l:
+  case Bytecodes::_f2d:
+
+  case Bytecodes::_d2i:
+  case Bytecodes::_d2l:
+  case Bytecodes::_d2f:
+    emit_load(srctype, index);
+    stack_pop(srctype);  // pop the src type
+    emit_bc(op);
+    stack_push(type);    // push the dest value
+    if (srctype != type)
+      index = new_local_index(type);
+    emit_store(type, index);
+    break;
+
+  case Bytecodes::_checkcast:
+    emit_load(srctype, index);
+    emit_bc(op, cpool_klass_put(tk));
+    emit_store(srctype, index);
+    break;
+
+  default:
+    ShouldNotReachHere();
+  }
+
+  return make_parameter(type, tk, index, THREAD);
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleCompiler
+//
+
+static jvalue zero_jvalue;
+
+// Emit bytecodes for the given invoke instruction.
+MethodHandleWalker::ArgToken
+MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid,
+                                  Bytecodes::Code op, bool tailcall,
+                                  int argc, MethodHandleWalker::ArgToken* argv,
+                                  TRAPS) {
+  if (m == NULL) {
+    // Get the intrinsic methodOop.
+    m = vmIntrinsics::method_for(iid);
+  }
+
+  klassOop  klass     = m->method_holder();
+  symbolOop name      = m->name();
+  symbolOop signature = m->signature();
+
+  if (tailcall) {
+    // Actually, in order to make these methods more recognizable,
+    // let's put them in holder classes MethodHandle and InvokeDynamic.
+    // That way stack walkers and compiler heuristics can recognize them.
+    _target_klass = (for_invokedynamic()
+                     ? SystemDictionary::InvokeDynamic_klass()
+                     : SystemDictionary::MethodHandle_klass());
+  }
+
+  // instanceKlass* ik = instanceKlass::cast(klass);
+  // tty->print_cr("MethodHandleCompiler::make_invoke: %s %s.%s%s", Bytecodes::name(op), ik->external_name(), name->as_C_string(), signature->as_C_string());
+
+  // Inline the method.
+  InvocationCounter* ic = m->invocation_counter();
+  ic->set_carry();
+
+  for (int i = 0; i < argc; i++) {
+    ArgToken arg = argv[i];
+    TokenType tt = arg.token_type();
+    BasicType bt = arg.basic_type();
+
+    switch (tt) {
+    case tt_parameter:
+    case tt_temporary:
+      emit_load(bt, arg.index());
+      break;
+    case tt_constant:
+      emit_load_constant(arg);
+      break;
+    case tt_illegal:
+      // Sentinel.
+      assert(i == (argc - 1), "sentinel must be last entry");
+      break;
+    case tt_void:
+    default:
+      ShouldNotReachHere();
+    }
+  }
+
+  // Populate constant pool.
+  int name_index          = cpool_symbol_put(name);
+  int signature_index     = cpool_symbol_put(signature);
+  int name_and_type_index = cpool_name_and_type_put(name_index, signature_index);
+  int klass_index         = cpool_klass_put(klass);
+  int methodref_index     = cpool_methodref_put(klass_index, name_and_type_index);
+
+  // Generate invoke.
+  switch (op) {
+  case Bytecodes::_invokestatic:
+  case Bytecodes::_invokespecial:
+  case Bytecodes::_invokevirtual:
+    emit_bc(op, methodref_index);
+    break;
+  case Bytecodes::_invokeinterface:
+    Unimplemented();
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+
+  // If tailcall, we have walked all the way to a direct method handle.
+  // Otherwise, make a recursive call to some helper routine.
+  BasicType rbt = m->result_type();
+  if (rbt == T_ARRAY)  rbt = T_OBJECT;
+  ArgToken ret;
+  if (tailcall) {
+    if (rbt != _rtype) {
+      if (rbt == T_VOID) {
+        // push a zero of the right sort
+        ArgToken zero;
+        if (_rtype == T_OBJECT) {
+          zero = make_oop_constant(NULL, CHECK_(zero));
+        } else {
+          zero = make_prim_constant(_rtype, &zero_jvalue, CHECK_(zero));
+        }
+        emit_load_constant(zero);
+      } else if (_rtype == T_VOID) {
+        // We'll emit a _return with something on the stack.
+        // It's OK to ignore what's on the stack.
+      } else {
+        tty->print_cr("*** rbt=%d != rtype=%d", rbt, _rtype);
+        assert(false, "IMPLEMENT ME");
+      }
+    }
+    switch (_rtype) {
+    case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+    case T_INT:    emit_bc(Bytecodes::_ireturn); break;
+    case T_LONG:   emit_bc(Bytecodes::_lreturn); break;
+    case T_FLOAT:  emit_bc(Bytecodes::_freturn); break;
+    case T_DOUBLE: emit_bc(Bytecodes::_dreturn); break;
+    case T_VOID:   emit_bc(Bytecodes::_return);  break;
+    case T_OBJECT:
+      if (_rklass.not_null() && _rklass() != SystemDictionary::Object_klass())
+        emit_bc(Bytecodes::_checkcast, cpool_klass_put(_rklass()));
+      emit_bc(Bytecodes::_areturn);
+      break;
+    default: ShouldNotReachHere();
+    }
+    ret = ArgToken();  // Dummy return value.
+  }
+  else {
+    stack_push(rbt);  // The return value is already pushed onto the stack.
+    int index = new_local_index(rbt);
+    switch (rbt) {
+    case T_BOOLEAN: case T_BYTE: case T_CHAR:  case T_SHORT:
+    case T_INT:     case T_LONG: case T_FLOAT: case T_DOUBLE:
+    case T_OBJECT:
+      emit_store(rbt, index);
+      ret = ArgToken(tt_temporary, rbt, index);
+      break;
+    case T_VOID:
+      ret = ArgToken(tt_void);
+      break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+
+  return ret;
+}
+
+MethodHandleWalker::ArgToken
+MethodHandleCompiler::make_fetch(BasicType type, klassOop tk, Bytecodes::Code op,
+                                 const MethodHandleWalker::ArgToken& base,
+                                 const MethodHandleWalker::ArgToken& offset,
+                                 TRAPS) {
+  Unimplemented();
+  return ArgToken();
+}
+
+
+int MethodHandleCompiler::cpool_primitive_put(BasicType bt, jvalue* con) {
+  jvalue con_copy;
+  assert(bt < T_OBJECT, "");
+  if (type2aelembytes(bt) < jintSize) {
+    // widen to int
+    con_copy = (*con);
+    con = &con_copy;
+    switch (bt) {
+    case T_BOOLEAN: con->i = (con->z ? 1 : 0); break;
+    case T_BYTE:    con->i = con->b;           break;
+    case T_CHAR:    con->i = con->c;           break;
+    case T_SHORT:   con->i = con->s;           break;
+    default: ShouldNotReachHere();
+    }
+    bt = T_INT;
+  }
+
+//   for (int i = 1, imax = _constants.length(); i < imax; i++) {
+//     ConstantValue* con = _constants.at(i);
+//     if (con != NULL && con->is_primitive() && con->_type == bt) {
+//       bool match = false;
+//       switch (type2size[bt]) {
+//       case 1:  if (pcon->_value.i == con->i)  match = true;  break;
+//       case 2:  if (pcon->_value.j == con->j)  match = true;  break;
+//       }
+//       if (match)
+//         return i;
+//     }
+//   }
+  ConstantValue* cv = new ConstantValue(bt, *con);
+  int index = _constants.append(cv);
+
+  // long and double entries take 2 slots, we add another empty entry.
+  if (type2size[bt] == 2)
+    (void) _constants.append(NULL);
+
+  return index;
+}
+
+
+constantPoolHandle MethodHandleCompiler::get_constant_pool(TRAPS) const {
+  constantPoolHandle nullHandle;
+  bool is_conc_safe = true;
+  constantPoolOop cpool_oop = oopFactory::new_constantPool(_constants.length(), is_conc_safe, CHECK_(nullHandle));
+  constantPoolHandle cpool(THREAD, cpool_oop);
+
+  // Fill the real constant pool skipping the zero element.
+  for (int i = 1; i < _constants.length(); i++) {
+    ConstantValue* cv = _constants.at(i);
+    switch (cv->tag()) {
+    case JVM_CONSTANT_Utf8:        cpool->symbol_at_put(       i, cv->symbol_oop()                     ); break;
+    case JVM_CONSTANT_Integer:     cpool->int_at_put(          i, cv->get_jint()                       ); break;
+    case JVM_CONSTANT_Float:       cpool->float_at_put(        i, cv->get_jfloat()                     ); break;
+    case JVM_CONSTANT_Long:        cpool->long_at_put(         i, cv->get_jlong()                      ); break;
+    case JVM_CONSTANT_Double:      cpool->double_at_put(       i, cv->get_jdouble()                    ); break;
+    case JVM_CONSTANT_Class:       cpool->klass_at_put(        i, cv->klass_oop()                      ); break;
+    case JVM_CONSTANT_Methodref:   cpool->method_at_put(       i, cv->first_index(), cv->second_index()); break;
+    case JVM_CONSTANT_NameAndType: cpool->name_and_type_at_put(i, cv->first_index(), cv->second_index()); break;
+    case JVM_CONSTANT_Object:      cpool->object_at_put(       i, cv->object_oop()                     ); break;
+    default: ShouldNotReachHere();
+    }
+
+    switch (cv->tag()) {
+    case JVM_CONSTANT_Long:
+    case JVM_CONSTANT_Double:
+      i++;  // Skip empty entry.
+      assert(_constants.at(i) == NULL, "empty entry");
+      break;
+    }
+  }
+
+  // Set the constant pool holder to the target method's class.
+  cpool->set_pool_holder(_target_klass());
+
+  return cpool;
+}
+
+
+methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const {
+  methodHandle nullHandle;
+  // Create a method that holds the generated bytecode.  invokedynamic
+  // has no receiver, normal MH calls do.
+  int flags_bits;
+  if (for_invokedynamic())
+    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_STATIC);
+  else
+    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL);
+
+  bool is_conc_safe = true;
+  methodOop m_oop = oopFactory::new_method(bytecode_length(),
+                                           accessFlags_from(flags_bits),
+                                           0, 0, 0, is_conc_safe, CHECK_(nullHandle));
+  methodHandle m(THREAD, m_oop);
+  m_oop = NULL;  // oop not GC safe
+
+  constantPoolHandle cpool = get_constant_pool(CHECK_(nullHandle));
+  m->set_constants(cpool());
+
+  m->set_name_index(_name_index);
+  m->set_signature_index(_signature_index);
+
+  m->set_code((address) bytecode());
+
+  m->set_max_stack(_max_stack);
+  m->set_max_locals(max_locals());
+  m->set_size_of_parameters(_num_params);
+
+  typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array());
+  m->set_exception_table(exception_handlers());
+
+  // Set the carry bit of the invocation counter to force inlining of
+  // the adapter.
+  InvocationCounter* ic = m->invocation_counter();
+  ic->set_carry();
+
+  // Rewrite the method and set up the constant pool cache.
+  objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(nullHandle));
+  objArrayHandle methods(THREAD, m_array);
+  methods->obj_at_put(0, m());
+  Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(nullHandle));  // Use fake class.
+
+#ifndef PRODUCT
+  if (TraceMethodHandles) {
+    m->print();
+    m->print_codes();
+  }
+#endif //PRODUCT
+
+  return m;
+}
+
+
+#ifndef PRODUCT
+
+#if 0
+// MH printer for debugging.
+
+class MethodHandlePrinter : public MethodHandleWalker {
+private:
+  outputStream* _out;
+  bool          _verbose;
+  int           _temp_num;
+  stringStream  _strbuf;
+  const char* strbuf() {
+    const char* s = _strbuf.as_string();
+    _strbuf.reset();
+    return s;
+  }
+  ArgToken token(const char* str) {
+    return (ArgToken) str;
+  }
+  void start_params() {
+    _out->print("(");
+  }
+  void end_params() {
+    if (_verbose)  _out->print("\n");
+    _out->print(") => {");
+  }
+  void put_type_name(BasicType type, klassOop tk, outputStream* s) {
+    const char* kname = NULL;
+    if (tk != NULL)
+      kname = Klass::cast(tk)->external_name();
+    s->print("%s", (kname != NULL) ? kname : type2name(type));
+  }
+  ArgToken maybe_make_temp(const char* statement_op, BasicType type, const char* temp_name) {
+    const char* value = strbuf();
+    if (!_verbose)  return token(value);
+    // make an explicit binding for each separate value
+    _strbuf.print("%s%d", temp_name, ++_temp_num);
+    const char* temp = strbuf();
+    _out->print("\n  %s %s %s = %s;", statement_op, type2name(type), temp, value);
+    return token(temp);
+  }
+
+public:
+  MethodHandlePrinter(Handle root, bool verbose, outputStream* out, TRAPS)
+    : MethodHandleWalker(root, THREAD),
+      _out(out),
+      _verbose(verbose),
+      _temp_num(0)
+  {
+    start_params();
+  }
+  virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) {
+    if (argnum < 0) {
+      end_params();
+      return NULL;
+    }
+    if (argnum == 0) {
+      _out->print(_verbose ? "\n  " : "");
+    } else {
+      _out->print(_verbose ? ",\n  " : ", ");
+    }
+    if (argnum >= _temp_num)
+      _temp_num = argnum;
+    // generate an argument name
+    _strbuf.print("a%d", argnum);
+    const char* arg = strbuf();
+    put_type_name(type, tk, _out);
+    _out->print(" %s", arg);
+    return token(arg);
+  }
+  virtual ArgToken make_oop_constant(oop con, TRAPS) {
+    if (con == NULL)
+      _strbuf.print("null");
+    else
+      con->print_value_on(&_strbuf);
+    if (_strbuf.size() == 0) {  // yuck
+      _strbuf.print("(a ");
+      put_type_name(T_OBJECT, con->klass(), &_strbuf);
+      _strbuf.print(")");
+    }
+    return maybe_make_temp("constant", T_OBJECT, "k");
+  }
+  virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) {
+    java_lang_boxing_object::print(type, con, &_strbuf);
+    return maybe_make_temp("constant", type, "k");
+  }
+  virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken src, TRAPS) {
+    _strbuf.print("%s(%s", Bytecodes::name(op), (const char*)src);
+    if (tk != NULL) {
+      _strbuf.print(", ");
+      put_type_name(type, tk, &_strbuf);
+    }
+    _strbuf.print(")");
+    return maybe_make_temp("convert", type, "v");
+  }
+  virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken base, ArgToken offset, TRAPS) {
+    _strbuf.print("%s(%s, %s", Bytecodes::name(op), (const char*)base, (const char*)offset);
+    if (tk != NULL) {
+      _strbuf.print(", ");
+      put_type_name(type, tk, &_strbuf);
+    }
+    _strbuf.print(")");
+    return maybe_make_temp("fetch", type, "x");
+  }
+  virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid,
+                               Bytecodes::Code op, bool tailcall,
+                               int argc, ArgToken* argv, TRAPS) {
+    symbolOop name, sig;
+    if (m != NULL) {
+      name = m->name();
+      sig  = m->signature();
+    } else {
+      name = vmSymbols::symbol_at(vmIntrinsics::name_for(iid));
+      sig  = vmSymbols::symbol_at(vmIntrinsics::signature_for(iid));
+    }
+    _strbuf.print("%s %s%s(", Bytecodes::name(op), name->as_C_string(), sig->as_C_string());
+    for (int i = 0; i < argc; i++) {
+      _strbuf.print("%s%s", (i > 0 ? ", " : ""), (const char*)argv[i]);
+    }
+    _strbuf.print(")");
+    if (!tailcall) {
+      BasicType rt = char2type(sig->byte_at(sig->utf8_length()-1));
+      if (rt == T_ILLEGAL)  rt = T_OBJECT;  // ';' at the end of '(...)L...;'
+      return maybe_make_temp("invoke", rt, "x");
+    } else {
+      const char* ret = strbuf();
+      _out->print(_verbose ? "\n  return " : " ");
+      _out->print("%s", ret);
+      _out->print(_verbose ? "\n}\n" : " }");
+    }
+    return ArgToken();
+  }
+
+  virtual void set_method_handle(oop mh) {
+    if (WizardMode && Verbose) {
+      tty->print("\n--- next target: ");
+      mh->print();
+    }
+  }
+
+  static void print(Handle root, bool verbose, outputStream* out, TRAPS) {
+    ResourceMark rm;
+    MethodHandlePrinter printer(root, verbose, out, CHECK);
+    printer.walk(CHECK);
+    out->print("\n");
+  }
+  static void print(Handle root, bool verbose = Verbose, outputStream* out = tty) {
+    EXCEPTION_MARK;
+    ResourceMark rm;
+    MethodHandlePrinter printer(root, verbose, out, THREAD);
+    if (!HAS_PENDING_EXCEPTION)
+      printer.walk(THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      oop ex = PENDING_EXCEPTION;
+      CLEAR_PENDING_EXCEPTION;
+      out->print("\n*** ");
+      if (ex != Universe::virtual_machine_error_instance())
+        ex->print_on(out);
+      else
+        out->print("lose: %s", printer.lose_message());
+      out->print("\n}\n");
+    }
+    out->print("\n");
+  }
+};
+#endif // 0
+
+extern "C"
+void print_method_handle(oop mh) {
+  if (java_dyn_MethodHandle::is_instance(mh)) {
+    //MethodHandlePrinter::print(mh);
+  } else {
+    tty->print("*** not a method handle: ");
+    mh->print();
+  }
+}
+
+#endif // PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/prims/methodHandleWalk.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,413 @@
+/*
+ * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Low-level parser for method handle chains.
+class MethodHandleChain : StackObj {
+public:
+  typedef MethodHandles::EntryKind EntryKind;
+
+private:
+  Handle        _root;          // original target
+  Handle        _method_handle; // current target
+  bool          _is_last;       // final guy in chain
+  bool          _is_bound;      // has a bound argument
+  BasicType     _arg_type;      // if is_bound, the bound argument type
+  int           _arg_slot;      // if is_bound or is_adapter, affected argument slot
+  jint          _conversion;    // conversion field of AMH or -1
+  methodHandle  _last_method;   // if is_last, which method we target
+  Bytecodes::Code _last_invoke; // if is_last, type of invoke
+  const char*   _lose_message;  // saved argument to lose()
+
+  void set_method_handle(Handle target, TRAPS);
+  void set_last_method(oop target, TRAPS);
+  static BasicType compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS);
+
+  oop MethodHandle_type_oop()     { return java_dyn_MethodHandle::type(method_handle_oop()); }
+  oop MethodHandle_vmtarget_oop() { return java_dyn_MethodHandle::vmtarget(method_handle_oop()); }
+  int MethodHandle_vmslots()      { return java_dyn_MethodHandle::vmslots(method_handle_oop()); }
+  int DirectMethodHandle_vmindex()     { return sun_dyn_DirectMethodHandle::vmindex(method_handle_oop()); }
+  oop BoundMethodHandle_argument_oop() { return sun_dyn_BoundMethodHandle::argument(method_handle_oop()); }
+  int BoundMethodHandle_vmargslot()    { return sun_dyn_BoundMethodHandle::vmargslot(method_handle_oop()); }
+  int AdapterMethodHandle_conversion() { return sun_dyn_AdapterMethodHandle::conversion(method_handle_oop()); }
+
+public:
+  MethodHandleChain(Handle root, TRAPS)
+    : _root(root)
+  { set_method_handle(root, THREAD); }
+
+  bool is_adapter()             { return _conversion != -1; }
+  bool is_bound()               { return _is_bound; }
+  bool is_last()                { return _is_last; }
+
+  void next(TRAPS) {
+    assert(!is_last(), "");
+    set_method_handle(MethodHandle_vmtarget_oop(), THREAD);
+  }
+
+  Handle method_handle()        { return _method_handle; }
+  oop    method_handle_oop()    { return _method_handle(); }
+  oop    method_type_oop()      { return MethodHandle_type_oop(); }
+  oop    vmtarget_oop()         { return MethodHandle_vmtarget_oop(); }
+
+  jint adapter_conversion()     { assert(is_adapter(), ""); return _conversion; }
+  int  adapter_conversion_op()  { return MethodHandles::adapter_conversion_op(adapter_conversion()); }
+  BasicType adapter_conversion_src_type()
+                                { return MethodHandles::adapter_conversion_src_type(adapter_conversion()); }
+  BasicType adapter_conversion_dest_type()
+                                { return MethodHandles::adapter_conversion_dest_type(adapter_conversion()); }
+  int  adapter_conversion_stack_move()
+                                { return MethodHandles::adapter_conversion_stack_move(adapter_conversion()); }
+  int  adapter_conversion_stack_pushes()
+                                { return adapter_conversion_stack_move() / MethodHandles::stack_move_unit(); }
+  int  adapter_conversion_vminfo()
+                                { return MethodHandles::adapter_conversion_vminfo(adapter_conversion()); }
+  int adapter_arg_slot()        { assert(is_adapter(), ""); return _arg_slot; }
+  oop adapter_arg_oop()         { assert(is_adapter(), ""); return BoundMethodHandle_argument_oop(); }
+
+  BasicType bound_arg_type()    { assert(is_bound(), ""); return _arg_type; }
+  int       bound_arg_slot()    { assert(is_bound(), ""); return _arg_slot; }
+  oop       bound_arg_oop()     { assert(is_bound(), ""); return BoundMethodHandle_argument_oop(); }
+
+  methodOop last_method_oop()   { assert(is_last(), ""); return _last_method(); }
+  Bytecodes::Code last_invoke_code() { assert(is_last(), ""); return _last_invoke; }
+
+  void lose(const char* msg, TRAPS);
+  const char* lose_message()    { return _lose_message; }
+};
+
+
+// Structure walker for method handles.
+// Does abstract interpretation on top of low-level parsing.
+// You supply the tokens shuffled by the abstract interpretation.
+class MethodHandleWalker : StackObj {
+public:
+  // Stack values:
+  enum TokenType {
+    tt_void,
+    tt_parameter,
+    tt_temporary,
+    tt_constant,
+    tt_illegal
+  };
+
+  // Argument token:
+  class ArgToken {
+  private:
+    TokenType _tt;
+    BasicType _bt;
+    jvalue    _value;
+    Handle    _handle;
+
+  public:
+    ArgToken(TokenType tt = tt_illegal) : _tt(tt) {}
+    ArgToken(TokenType tt, BasicType bt, jvalue value) : _tt(tt), _bt(bt), _value(value) {}
+
+    ArgToken(TokenType tt, BasicType bt, int index) : _tt(tt), _bt(bt) {
+      _value.i = index;
+    }
+
+    ArgToken(TokenType tt, BasicType bt, Handle value) : _tt(tt), _bt(bt) {
+      _handle = value;
+    }
+
+    TokenType token_type()  const { return _tt; }
+    BasicType basic_type()  const { return _bt; }
+    int       index()       const { return _value.i; }
+    Handle    object()      const { return _handle; }
+
+    jint      get_jint()    const { return _value.i; }
+    jlong     get_jlong()   const { return _value.j; }
+    jfloat    get_jfloat()  const { return _value.f; }
+    jdouble   get_jdouble() const { return _value.d; }
+  };
+
+  // Abstract interpretation state:
+  struct SlotState {
+    BasicType _type;
+    ArgToken  _arg;
+    SlotState() : _type(), _arg() {}
+  };
+  static SlotState make_state(BasicType type, ArgToken arg) {
+    SlotState ss;
+    ss._type = type; ss._arg = arg;
+    return ss;
+  }
+
+private:
+  MethodHandleChain _chain;
+  bool              _for_invokedynamic;
+  int               _local_index;
+
+  GrowableArray<SlotState> _outgoing;       // current outgoing parameter slots
+  int                      _outgoing_argc;  // # non-empty outgoing slots
+
+  // Replace a value of type old_type at slot (and maybe slot+1) with the new value.
+  // If old_type != T_VOID, remove the old argument at that point.
+  // If new_type != T_VOID, insert the new argument at that point.
+  // Insert or delete a second empty slot as needed.
+  void change_argument(BasicType old_type, int slot, BasicType new_type, const ArgToken& new_arg);
+
+  SlotState* slot_state(int slot) {
+    if (slot < 0 || slot >= _outgoing.length())
+      return NULL;
+    return _outgoing.adr_at(slot);
+  }
+  BasicType slot_type(int slot) {
+    SlotState* ss = slot_state(slot);
+    if (ss == NULL)
+      return T_ILLEGAL;
+    return ss->_type;
+  }
+  bool slot_has_argument(int slot) {
+    return slot_type(slot) < T_VOID;
+  }
+
+#ifdef ASSERT
+  int argument_count_slow();
+#endif
+
+  // Return a bytecode for converting src to dest, if one exists.
+  Bytecodes::Code conversion_code(BasicType src, BasicType dest);
+
+  void walk_incoming_state(TRAPS);
+
+public:
+  MethodHandleWalker(Handle root, bool for_invokedynamic, TRAPS)
+    : _chain(root, THREAD),
+      _for_invokedynamic(for_invokedynamic),
+      _outgoing(THREAD, 10),
+      _outgoing_argc(0)
+  {
+    _local_index = for_invokedynamic ? 0 : 1;
+  }
+
+  MethodHandleChain& chain() { return _chain; }
+
+  bool for_invokedynamic() const { return _for_invokedynamic; }
+
+  int new_local_index(BasicType bt) {
+    //int index = _for_invokedynamic ? _local_index : _local_index - 1;
+    int index = _local_index;
+    _local_index += type2size[bt];
+    return index;
+  }
+
+  int max_locals() const { return _local_index; }
+
+  // plug-in abstract interpretation steps:
+  virtual ArgToken make_parameter( BasicType type, klassOop tk, int argnum, TRAPS ) = 0;
+  virtual ArgToken make_prim_constant( BasicType type, jvalue* con, TRAPS ) = 0;
+  virtual ArgToken make_oop_constant( oop con, TRAPS ) = 0;
+  virtual ArgToken make_conversion( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS ) = 0;
+  virtual ArgToken make_fetch( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS ) = 0;
+  virtual ArgToken make_invoke( methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS ) = 0;
+
+  // For make_invoke, the methodOop can be NULL if the intrinsic ID
+  // is something other than vmIntrinsics::_none.
+
+  // and in case anyone cares to related the previous actions to the chain:
+  virtual void set_method_handle(oop mh) { }
+
+  void lose(const char* msg, TRAPS) { chain().lose(msg, THREAD); }
+  const char* lose_message()        { return chain().lose_message(); }
+
+  ArgToken walk(TRAPS);
+};
+
+
+// An abstract interpreter for method handle chains.
+// Produces an account of the semantics of a chain, in terms of a static IR.
+// The IR happens to be JVM bytecodes.
+class MethodHandleCompiler : public MethodHandleWalker {
+private:
+  methodHandle _callee;
+  KlassHandle  _rklass;        // Return type for casting.
+  BasicType    _rtype;
+  KlassHandle  _target_klass;
+  Thread*      _thread;
+
+  // Fake constant pool entry.
+  class ConstantValue {
+  private:
+    int       _tag;   // Constant pool tag type.
+    JavaValue _value;
+    Handle    _handle;
+
+  public:
+    // Constructor for oop types.
+    ConstantValue(int tag, Handle con) : _tag(tag), _handle(con) {
+      assert(tag == JVM_CONSTANT_Utf8   ||
+             tag == JVM_CONSTANT_Class  ||
+             tag == JVM_CONSTANT_String ||
+             tag == JVM_CONSTANT_Object, "must be oop type");
+    }
+
+    // Constructor for oop reference types.
+    ConstantValue(int tag, int index) : _tag(tag) {
+      assert(JVM_CONSTANT_Fieldref <= tag && tag <= JVM_CONSTANT_NameAndType, "must be ref type");
+      _value.set_jint(index);
+    }
+    ConstantValue(int tag, int first_index, int second_index) : _tag(tag) {
+      assert(JVM_CONSTANT_Fieldref <= tag && tag <= JVM_CONSTANT_NameAndType, "must be ref type");
+      _value.set_jint(first_index << 16 | second_index);
+    }
+
+    // Constructor for primitive types.
+    ConstantValue(BasicType bt, jvalue con) {
+      _value.set_type(bt);
+      switch (bt) {
+      case T_INT:    _tag = JVM_CONSTANT_Integer; _value.set_jint(   con.i); break;
+      case T_LONG:   _tag = JVM_CONSTANT_Long;    _value.set_jlong(  con.j); break;
+      case T_FLOAT:  _tag = JVM_CONSTANT_Float;   _value.set_jfloat( con.f); break;
+      case T_DOUBLE: _tag = JVM_CONSTANT_Double;  _value.set_jdouble(con.d); break;
+      default: ShouldNotReachHere();
+      }
+    }
+
+    int       tag()          const { return _tag; }
+    symbolOop symbol_oop()   const { return (symbolOop) _handle(); }
+    klassOop  klass_oop()    const { return (klassOop)  _handle(); }
+    oop       object_oop()   const { return _handle(); }
+    int       index()        const { return _value.get_jint(); }
+    int       first_index()  const { return _value.get_jint() >> 16; }
+    int       second_index() const { return _value.get_jint() & 0x0000FFFF; }
+
+    bool      is_primitive() const { return is_java_primitive(_value.get_type()); }
+    jint      get_jint()     const { return _value.get_jint();    }
+    jlong     get_jlong()    const { return _value.get_jlong();   }
+    jfloat    get_jfloat()   const { return _value.get_jfloat();  }
+    jdouble   get_jdouble()  const { return _value.get_jdouble(); }
+  };
+
+  // Fake constant pool.
+  GrowableArray<ConstantValue*> _constants;
+
+  // Accumulated compiler state:
+  GrowableArray<unsigned char> _bytecode;
+
+  int _cur_stack;
+  int _max_stack;
+  int _num_params;
+  int _name_index;
+  int _signature_index;
+
+  void stack_push(BasicType bt) {
+    _cur_stack += type2size[bt];
+    if (_cur_stack > _max_stack) _max_stack = _cur_stack;
+  }
+  void stack_pop(BasicType bt) {
+    _cur_stack -= type2size[bt];
+    assert(_cur_stack >= 0, "sanity");
+  }
+
+  unsigned char* bytecode()        const { return _bytecode.adr_at(0); }
+  int            bytecode_length() const { return _bytecode.length(); }
+
+  // Fake constant pool.
+  int cpool_oop_put(int tag, Handle con) {
+    if (con.is_null())  return 0;
+    ConstantValue* cv = new ConstantValue(tag, con);
+    return _constants.append(cv);
+  }
+
+  int cpool_oop_reference_put(int tag, int first_index, int second_index) {
+    if (first_index == 0 && second_index == 0)  return 0;
+    assert(first_index != 0 && second_index != 0, "no zero indexes");
+    ConstantValue* cv = new ConstantValue(tag, first_index, second_index);
+    return _constants.append(cv);
+  }
+
+  int cpool_primitive_put(BasicType type, jvalue* con);
+
+  int cpool_int_put(jint value) {
+    jvalue con; con.i = value;
+    return cpool_primitive_put(T_INT, &con);
+  }
+  int cpool_long_put(jlong value) {
+    jvalue con; con.j = value;
+    return cpool_primitive_put(T_LONG, &con);
+  }
+  int cpool_float_put(jfloat value) {
+    jvalue con; con.f = value;
+    return cpool_primitive_put(T_FLOAT, &con);
+  }
+  int cpool_double_put(jdouble value) {
+    jvalue con; con.d = value;
+    return cpool_primitive_put(T_DOUBLE, &con);
+  }
+
+  int cpool_object_put(Handle obj) {
+    return cpool_oop_put(JVM_CONSTANT_Object, obj);
+  }
+  int cpool_symbol_put(symbolOop sym) {
+    return cpool_oop_put(JVM_CONSTANT_Utf8, sym);
+  }
+  int cpool_klass_put(klassOop klass) {
+    return cpool_oop_put(JVM_CONSTANT_Class, klass);
+  }
+  int cpool_methodref_put(int class_index, int name_and_type_index) {
+    return cpool_oop_reference_put(JVM_CONSTANT_Methodref, class_index, name_and_type_index);
+  }
+  int cpool_name_and_type_put(int name_index, int signature_index) {
+    return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index);
+  }
+
+  void emit_bc(Bytecodes::Code op, int index = 0);
+  void emit_load(BasicType bt, int index);
+  void emit_store(BasicType bt, int index);
+  void emit_load_constant(ArgToken arg);
+
+  virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) {
+    return ArgToken(tt_parameter, type, argnum);
+  }
+  virtual ArgToken make_oop_constant(oop con, TRAPS) {
+    Handle h(THREAD, con);
+    return ArgToken(tt_constant, T_OBJECT, h);
+  }
+  virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) {
+    return ArgToken(tt_constant, type, *con);
+  }
+
+  virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS);
+  virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS);
+  virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS);
+
+  // Get a real constant pool.
+  constantPoolHandle get_constant_pool(TRAPS) const;
+
+  // Get a real methodOop.
+  methodHandle get_method_oop(TRAPS) const;
+
+public:
+  MethodHandleCompiler(Handle root, methodHandle call_method, bool for_invokedynamic, TRAPS);
+
+  // Compile the given MH chain into bytecode.
+  methodHandle compile(TRAPS);
+
+  // Tests if the given class is a MH adapter holder.
+  static bool klass_is_method_handle_adapter_holder(klassOop klass) {
+    return (klass == SystemDictionary::MethodHandle_klass() ||
+            klass == SystemDictionary::InvokeDynamic_klass());
+  }
+};
--- a/src/share/vm/prims/methodHandles.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/methodHandles.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -132,8 +132,9 @@
     }
     return m;
   } else {
+    assert(vmtarget->is_klass(), "must be class or interface");
     decode_flags_result |= MethodHandles::_dmf_does_dispatch;
-    assert(vmtarget->is_klass(), "must be class or interface");
+    decode_flags_result |= MethodHandles::_dmf_has_receiver;
     receiver_limit_result = (klassOop)vmtarget;
     Klass* tk = Klass::cast((klassOop)vmtarget);
     if (tk->is_interface()) {
@@ -142,7 +143,7 @@
       return klassItable::method_for_itable_index((klassOop)vmtarget, vmindex);
     } else {
       if (!tk->oop_is_instance())
-        tk = instanceKlass::cast(SystemDictionary::object_klass());
+        tk = instanceKlass::cast(SystemDictionary::Object_klass());
       return ((instanceKlass*)tk)->method_at_vtable(vmindex);
     }
   }
@@ -179,8 +180,10 @@
         // short-circuits directly to the methodOop.
         // (It might be another argument besides a receiver also.)
         assert(target->is_method(), "must be a simple method");
+        decode_flags_result |= MethodHandles::_dmf_binds_method;
         methodOop m = (methodOop) target;
-        decode_flags_result |= MethodHandles::_dmf_binds_method;
+        if (!m->is_static())
+          decode_flags_result |= MethodHandles::_dmf_has_receiver;
         return m;
       }
     }
@@ -233,8 +236,8 @@
     BasicType recv_bt = char2type(sig->byte_at(1));
     // Note: recv_bt might be T_ILLEGAL if byte_at(2) is ')'
     assert(sig->byte_at(0) == '(', "must be method sig");
-    if (recv_bt == T_OBJECT || recv_bt == T_ARRAY)
-      decode_flags_result |= _dmf_has_receiver;
+//     if (recv_bt == T_OBJECT || recv_bt == T_ARRAY)
+//       decode_flags_result |= _dmf_has_receiver;
   } else {
     // non-static method
     decode_flags_result |= _dmf_has_receiver;
@@ -261,14 +264,14 @@
     return decode_MemberName(x, receiver_limit_result, decode_flags_result);
   } else if (java_dyn_MethodHandle::is_subclass(xk)) {
     return decode_MethodHandle(x, receiver_limit_result, decode_flags_result);
-  } else if (xk == SystemDictionary::reflect_method_klass()) {
+  } else if (xk == SystemDictionary::reflect_Method_klass()) {
     oop clazz  = java_lang_reflect_Method::clazz(x);
     int slot   = java_lang_reflect_Method::slot(x);
     klassOop k = java_lang_Class::as_klassOop(clazz);
     if (k != NULL && Klass::cast(k)->oop_is_instance())
       return decode_methodOop(instanceKlass::cast(k)->method_with_idnum(slot),
                               decode_flags_result);
-  } else if (xk == SystemDictionary::reflect_constructor_klass()) {
+  } else if (xk == SystemDictionary::reflect_Constructor_klass()) {
     oop clazz  = java_lang_reflect_Constructor::clazz(x);
     int slot   = java_lang_reflect_Constructor::slot(x);
     klassOop k = java_lang_Class::as_klassOop(clazz);
@@ -325,7 +328,7 @@
 };
 
 void MethodHandles::init_MemberName(oop mname_oop, oop target_oop) {
-  if (target_oop->klass() == SystemDictionary::reflect_field_klass()) {
+  if (target_oop->klass() == SystemDictionary::reflect_Field_klass()) {
     oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder()
     int slot  = java_lang_reflect_Field::slot(target_oop);  // fd.index()
     int mods  = java_lang_reflect_Field::modifiers(target_oop);
@@ -410,7 +413,7 @@
   if (defc_klassOop == NULL)  return;  // a primitive; no resolution possible
   if (!Klass::cast(defc_klassOop)->oop_is_instance()) {
     if (!Klass::cast(defc_klassOop)->oop_is_array())  return;
-    defc_klassOop = SystemDictionary::object_klass();
+    defc_klassOop = SystemDictionary::Object_klass();
   }
   instanceKlassHandle defc(THREAD, defc_klassOop);
   defc_klassOop = NULL;  // safety
@@ -746,7 +749,7 @@
       return NULL;                // unformed MH
     }
     klassOop tklass = target->klass();
-    if (Klass::cast(tklass)->is_subclass_of(SystemDictionary::object_klass())) {
+    if (Klass::cast(tklass)->is_subclass_of(SystemDictionary::Object_klass())) {
       return target;              // target is another MH (or something else?)
     }
   }
@@ -818,26 +821,26 @@
   for (int i = 0; ; i++) {
     const char* test_name = always_null_names[i];
     if (test_name == NULL)  break;
-    if (name->equals(test_name, (int) strlen(test_name)))
+    if (name->equals(test_name))
       return true;
   }
   return false;
 }
 
 bool MethodHandles::class_cast_needed(klassOop src, klassOop dst) {
-  if (src == dst || dst == SystemDictionary::object_klass())
+  if (src == dst || dst == SystemDictionary::Object_klass())
     return false;                               // quickest checks
   Klass* srck = Klass::cast(src);
   Klass* dstk = Klass::cast(dst);
   if (dstk->is_interface()) {
     // interface receivers can safely be viewed as untyped,
     // because interface calls always include a dynamic check
-    //dstk = Klass::cast(SystemDictionary::object_klass());
+    //dstk = Klass::cast(SystemDictionary::Object_klass());
     return false;
   }
   if (srck->is_interface()) {
     // interface arguments must be viewed as untyped
-    //srck = Klass::cast(SystemDictionary::object_klass());
+    //srck = Klass::cast(SystemDictionary::Object_klass());
     return true;
   }
   if (is_always_null_type(src)) {
@@ -850,7 +853,7 @@
 }
 
 static oop object_java_mirror() {
-  return Klass::cast(SystemDictionary::object_klass())->java_mirror();
+  return Klass::cast(SystemDictionary::Object_klass())->java_mirror();
 }
 
 bool MethodHandles::same_basic_type_for_arguments(BasicType src,
@@ -1446,7 +1449,7 @@
       break;
     }
     // check subrange of Integer.value, if necessary
-    if (argument == NULL || argument->klass() != SystemDictionary::int_klass()) {
+    if (argument == NULL || argument->klass() != SystemDictionary::Integer_klass()) {
       err = "bound integer argument must be of type java.lang.Integer";
       break;
     }
@@ -1469,7 +1472,7 @@
       BasicType argbox = java_lang_boxing_object::basic_type(argument);
       if (argbox != ptype) {
         err = check_argument_type_change(T_OBJECT, (argument == NULL
-                                                    ? SystemDictionary::object_klass()
+                                                    ? SystemDictionary::Object_klass()
                                                     : argument->klass()),
                                          ptype, ptype_klass(), argnum);
         assert(err != NULL, "this must be an error");
@@ -1487,8 +1490,9 @@
       int target_pushes = decode_MethodHandle_stack_pushes(target());
       assert(this_pushes == slots_pushed + target_pushes, "BMH stack motion must be correct");
       // do not blow the stack; use a Java-based adapter if this limit is exceeded
-      if (slots_pushed + target_pushes > MethodHandlePushLimit)
-        err = "too many bound parameters";
+      // FIXME
+      // if (slots_pushed + target_pushes > MethodHandlePushLimit)
+      //   err = "too many bound parameters";
     }
   }
 
@@ -1518,6 +1522,11 @@
     verify_vmslots(mh, CHECK);
   }
 
+  // Get bound type and required slots.
+  oop ptype_oop = java_dyn_MethodType::ptype(java_dyn_MethodHandle::type(target()), argnum);
+  BasicType ptype = java_lang_Class::as_BasicType(ptype_oop);
+  int slots_pushed = type2size[ptype];
+
   // If (a) the target is a direct non-dispatched method handle,
   // or (b) the target is a dispatched direct method handle and we
   // are binding the receiver, cut out the middle-man.
@@ -1529,7 +1538,7 @@
     int decode_flags = 0; klassOop receiver_limit_oop = NULL;
     methodHandle m(THREAD, decode_method(target(), receiver_limit_oop, decode_flags));
     if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); }
-    DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - 1); // pos. of 1st arg.
+    DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg.
     assert(sun_dyn_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig");
     if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) {
       KlassHandle receiver_limit(THREAD, receiver_limit_oop);
@@ -1554,10 +1563,6 @@
   }
 
   // Next question:  Is this a ref, int, or long bound value?
-  oop ptype_oop = java_dyn_MethodType::ptype(java_dyn_MethodHandle::type(target()), argnum);
-  BasicType ptype = java_lang_Class::as_BasicType(ptype_oop);
-  int slots_pushed = type2size[ptype];
-
   MethodHandleEntry* me = NULL;
   if (ptype == T_OBJECT) {
     if (direct_to_method)  me = MethodHandles::entry(_bound_ref_direct_mh);
@@ -2170,7 +2175,7 @@
       symbolOop name = vmSymbols::toString_name(), sig = vmSymbols::void_string_signature();
       JavaCallArguments args(Handle(THREAD, JNIHandles::resolve_non_null(erased_jh)));
       JavaValue result(T_OBJECT);
-      JavaCalls::call_virtual(&result, SystemDictionary::object_klass(), name, sig,
+      JavaCalls::call_virtual(&result, SystemDictionary::Object_klass(), name, sig,
                               &args, CHECK);
       Handle str(THREAD, (oop)result.get_jobject());
       java_lang_String::print(str, tty);
@@ -2347,9 +2352,9 @@
 JVM_ENTRY(void, MH_linkCallSite(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) {
   // No special action required, yet.
   oop site_oop = JNIHandles::resolve(site_jh);
-  if (site_oop == NULL || site_oop->klass() != SystemDictionary::CallSiteImpl_klass())
+  if (site_oop == NULL || site_oop->klass() != SystemDictionary::CallSite_klass())
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "call site");
-  sun_dyn_CallSiteImpl::set_target(site_oop, JNIHandles::resolve(target_jh));
+  java_dyn_CallSite::set_target(site_oop, JNIHandles::resolve(target_jh));
 }
 JVM_END
 
@@ -2365,6 +2370,7 @@
 #define OBJ   LANG"Object;"
 #define CLS   LANG"Class;"
 #define STRG  LANG"String;"
+#define CST   JDYN"CallSite;"
 #define MT    JDYN"MethodType;"
 #define MH    JDYN"MethodHandle;"
 #define MHI   IDYN"MethodHandleImpl;"
@@ -2372,7 +2378,6 @@
 #define AMH   IDYN"AdapterMethodHandle;"
 #define BMH   IDYN"BoundMethodHandle;"
 #define DMH   IDYN"DirectMethodHandle;"
-#define CSTI  IDYN"CallSiteImpl;"
 
 #define CC (char*)  /*cast a literal from (const char*)*/
 #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
@@ -2398,7 +2403,7 @@
 
 // More entry points specifically for EnableInvokeDynamic.
 static JNINativeMethod methods2[] = {
-  {CC"linkCallSite",            CC"("CSTI MH")V",               FN_PTR(MH_linkCallSite)}
+  {CC"linkCallSite",            CC"("CST MH")V",                FN_PTR(MH_linkCallSite)}
 };
 
 
--- a/src/share/vm/prims/nativeLookup.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/prims/nativeLookup.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -137,7 +137,7 @@
   }
 
   // Otherwise call static method findNative in ClassLoader
-  KlassHandle   klass (THREAD, SystemDictionary::classloader_klass());
+  KlassHandle   klass (THREAD, SystemDictionary::ClassLoader_klass());
   Handle name_arg = java_lang_String::create_from_str(jni_name, CHECK_NULL);
 
   JavaValue result(T_LONG);
--- a/src/share/vm/runtime/arguments.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/arguments.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,6 @@
 const char*  Arguments::_gc_log_filename        = NULL;
 bool   Arguments::_has_profile                  = false;
 bool   Arguments::_has_alloc_profile            = false;
-uintx  Arguments::_initial_heap_size            = 0;
 uintx  Arguments::_min_heap_size                = 0;
 Arguments::Mode Arguments::_mode                = _mixed;
 bool   Arguments::_java_compiler                = false;
@@ -182,6 +181,9 @@
   { "ProcessingToTenuringRatio",     JDK_Version::jdk(5), JDK_Version::jdk(7) },
   { "MinTrainLength",                JDK_Version::jdk(5), JDK_Version::jdk(7) },
   { "AppendRatio",         JDK_Version::jdk_update(6,10), JDK_Version::jdk(7) },
+  { "DefaultMaxRAM",       JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
+  { "DefaultInitialRAMFraction",
+                           JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
 
@@ -555,6 +557,10 @@
   if (!is_neg && CommandLineFlags::uintxAtPut(name, &uintx_v, origin)) {
     return true;
   }
+  uint64_t uint64_t_v = (uint64_t) v;
+  if (!is_neg && CommandLineFlags::uint64_tAtPut(name, &uint64_t_v, origin)) {
+    return true;
+  }
   return false;
 }
 
@@ -942,12 +948,13 @@
   }
 }
 
+#ifndef KERNEL
 // If the user has chosen ParallelGCThreads > 0, we set UseParNewGC
 // if it's not explictly set or unset. If the user has chosen
 // UseParNewGC and not explicitly set ParallelGCThreads we
 // set it, unless this is a single cpu machine.
 void Arguments::set_parnew_gc_flags() {
-  assert(!UseSerialGC && !UseParallelGC && !UseG1GC,
+  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
          "control point invariant");
   assert(UseParNewGC, "Error");
 
@@ -960,13 +967,13 @@
   if (ParallelGCThreads == 0) {
     FLAG_SET_DEFAULT(ParallelGCThreads,
                      Abstract_VM_Version::parallel_worker_threads());
-    if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
+    if (ParallelGCThreads == 1) {
       FLAG_SET_DEFAULT(UseParNewGC, false);
+      FLAG_SET_DEFAULT(ParallelGCThreads, 0);
     }
   }
-  if (!UseParNewGC) {
-    FLAG_SET_DEFAULT(ParallelGCThreads, 0);
-  } else {
+  if (UseParNewGC) {
+    // CDS doesn't work with ParNew yet
     no_shared_spaces();
 
     // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
@@ -980,7 +987,7 @@
       FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
     }
 
-    // AlwaysTenure flag should make ParNew to promote all at first collection.
+    // AlwaysTenure flag should make ParNew promote all at first collection.
     // See CR 6362902.
     if (AlwaysTenure) {
       FLAG_SET_CMDLINE(intx, MaxTenuringThreshold, 0);
@@ -1003,7 +1010,7 @@
 // further optimization and tuning efforts, and would almost
 // certainly gain from analysis of platform and environment.
 void Arguments::set_cms_and_parnew_gc_flags() {
-  assert(!UseSerialGC && !UseParallelGC, "Error");
+  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
   assert(UseConcMarkSweepGC, "CMS is expected to be on here");
 
   // If we are using CMS, we prefer to UseParNewGC,
@@ -1068,7 +1075,7 @@
     } else {
       FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size);
     }
-    if(PrintGCDetails && Verbose) {
+    if (PrintGCDetails && Verbose) {
       // Too early to use gclog_or_tty
       tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
     }
@@ -1097,15 +1104,15 @@
     } else {
       min_new = NewSize;
     }
-    size_t prev_initial_size = initial_heap_size();
-    if (prev_initial_size != 0 && prev_initial_size < min_new+OldSize) {
-      set_initial_heap_size(min_new+OldSize);
+    size_t prev_initial_size = InitialHeapSize;
+    if (prev_initial_size != 0 && prev_initial_size < min_new + OldSize) {
+      FLAG_SET_ERGO(uintx, InitialHeapSize, min_new + OldSize);
       // Currently minimum size and the initial heap sizes are the same.
-      set_min_heap_size(initial_heap_size());
+      set_min_heap_size(InitialHeapSize);
       if (PrintGCDetails && Verbose) {
         warning("Initial heap size increased to " SIZE_FORMAT " M from "
                 SIZE_FORMAT " M; use -XX:NewSize=... for finer control.",
-                initial_heap_size()/M, prev_initial_size/M);
+                InitialHeapSize/M, prev_initial_size/M);
       }
     }
 
@@ -1114,12 +1121,12 @@
       align_size_down(MaxHeapSize,
                       CardTableRS::ct_max_alignment_constraint());
 
-    if(PrintGCDetails && Verbose) {
+    if (PrintGCDetails && Verbose) {
       // Too early to use gclog_or_tty
       tty->print_cr("CMS set min_heap_size: " SIZE_FORMAT
            " initial_heap_size:  " SIZE_FORMAT
            " max_heap: " SIZE_FORMAT,
-           min_heap_size(), initial_heap_size(), max_heap);
+           min_heap_size(), InitialHeapSize, max_heap);
     }
     if (max_heap > min_new) {
       // Unless explicitly requested otherwise, make young gen
@@ -1127,7 +1134,7 @@
       if (FLAG_IS_DEFAULT(NewSize)) {
         FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new));
         FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
-        if(PrintGCDetails && Verbose) {
+        if (PrintGCDetails && Verbose) {
           // Too early to use gclog_or_tty
           tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize);
         }
@@ -1137,8 +1144,8 @@
       // later NewRatio will decide how it grows; see above.
       if (FLAG_IS_DEFAULT(OldSize)) {
         if (max_heap > NewSize) {
-          FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize,  max_heap - NewSize));
-          if(PrintGCDetails && Verbose) {
+          FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize, max_heap - NewSize));
+          if (PrintGCDetails && Verbose) {
             // Too early to use gclog_or_tty
             tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize);
           }
@@ -1171,8 +1178,7 @@
       // the value (either from the command line or ergonomics) of
       // OldPLABSize.  Following OldPLABSize is an ergonomics decision.
       FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
-    }
-    else {
+    } else {
       // OldPLABSize and CMSParPromoteBlocksToClaim are both set.
       // CMSParPromoteBlocksToClaim is a collector-specific flag, so
       // we'll let it to take precedence.
@@ -1182,11 +1188,27 @@
                   " CMSParPromoteBlocksToClaim will take precedence.\n");
     }
   }
+  if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
+    // OldPLAB sizing manually turned off: Use a larger default setting,
+    // unless it was manually specified. This is because a too-low value
+    // will slow down scavenges.
+    if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
+      FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, 50); // default value before 6631166
+    }
+  }
+  // Overwrite OldPLABSize which is the variable we will internally use everywhere.
+  FLAG_SET_ERGO(uintx, OldPLABSize, CMSParPromoteBlocksToClaim);
+  // If either of the static initialization defaults have changed, note this
+  // modification.
+  if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
+    CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
+  }
 }
+#endif // KERNEL
 
 inline uintx max_heap_for_compressed_oops() {
   LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
-  NOT_LP64(return DefaultMaxRAM);
+  NOT_LP64(ShouldNotReachHere(); return 0);
 }
 
 bool Arguments::should_auto_select_low_pause_collector() {
@@ -1205,7 +1227,7 @@
 
 void Arguments::set_ergonomics_flags() {
   // Parallel GC is not compatible with sharing. If one specifies
-  // that they want sharing explicitly, do not set ergonmics flags.
+  // that they want sharing explicitly, do not set ergonomics flags.
   if (DumpSharedSpaces || ForceSharedSpaces) {
     return;
   }
@@ -1234,9 +1256,11 @@
   // Check that UseCompressedOops can be set with the max heap size allocated
   // by ergonomics.
   if (MaxHeapSize <= max_heap_for_compressed_oops()) {
+#ifndef COMPILER1
     if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
       FLAG_SET_ERGO(bool, UseCompressedOops, true);
     }
+#endif
 #ifdef _WIN64
     if (UseLargePages && UseCompressedOops) {
       // Cannot allocate guard pages for implicit checks in indexed addressing
@@ -1271,8 +1295,6 @@
     FLAG_SET_ERGO(uintx, ParallelGCThreads,
                   Abstract_VM_Version::parallel_worker_threads());
 
-    // PS is a server collector, setup the heap sizes accordingly.
-    set_server_heap_size();
     // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
     // SurvivorRatio has been set, reset their default values to SurvivorRatio +
     // 2.  By doing this we make SurvivorRatio also work for Parallel Scavenger.
@@ -1302,8 +1324,6 @@
 
 void Arguments::set_g1_gc_flags() {
   assert(UseG1GC, "Error");
-  // G1 is a server collector, setup the heap sizes accordingly.
-  set_server_heap_size();
 #ifdef COMPILER1
   FastTLABRefill = false;
 #endif
@@ -1321,50 +1341,79 @@
   }
 }
 
-void Arguments::set_server_heap_size() {
+void Arguments::set_heap_size() {
+  if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
+    // Deprecated flag
+    FLAG_SET_CMDLINE(uintx, MaxRAMFraction, DefaultMaxRAMFraction);
+  }
+
+  const julong phys_mem =
+    FLAG_IS_DEFAULT(MaxRAM) ? MIN2(os::physical_memory(), (julong)MaxRAM)
+                            : (julong)MaxRAM;
+
+  // If the maximum heap size has not been set with -Xmx,
+  // then set it as fraction of the size of physical memory,
+  // respecting the maximum and minimum sizes of the heap.
   if (FLAG_IS_DEFAULT(MaxHeapSize)) {
-    const uint64_t reasonable_fraction =
-      os::physical_memory() / DefaultMaxRAMFraction;
-    const uint64_t maximum_size = (uint64_t)
-                 (FLAG_IS_DEFAULT(DefaultMaxRAM) && UseCompressedOops ?
-                     MIN2(max_heap_for_compressed_oops(), DefaultMaxRAM) :
-                     DefaultMaxRAM);
-    size_t reasonable_max =
-      (size_t) os::allocatable_physical_memory(reasonable_fraction);
-    if (reasonable_max > maximum_size) {
-      reasonable_max = maximum_size;
+    julong reasonable_max = phys_mem / MaxRAMFraction;
+
+    if (phys_mem <= MaxHeapSize * MinRAMFraction) {
+      // Small physical memory, so use a minimum fraction of it for the heap
+      reasonable_max = phys_mem / MinRAMFraction;
+    } else {
+      // Not-small physical memory, so require a heap at least
+      // as large as MaxHeapSize
+      reasonable_max = MAX2(reasonable_max, (julong)MaxHeapSize);
+    }
+    if (!FLAG_IS_DEFAULT(ErgoHeapSizeLimit) && ErgoHeapSizeLimit != 0) {
+      // Limit the heap size to ErgoHeapSizeLimit
+      reasonable_max = MIN2(reasonable_max, (julong)ErgoHeapSizeLimit);
     }
+    if (UseCompressedOops) {
+      // Limit the heap size to the maximum possible when using compressed oops
+      reasonable_max = MIN2(reasonable_max, (julong)max_heap_for_compressed_oops());
+    }
+    reasonable_max = os::allocatable_physical_memory(reasonable_max);
+
+    if (!FLAG_IS_DEFAULT(InitialHeapSize)) {
+      // An initial heap size was specified on the command line,
+      // so be sure that the maximum size is consistent.  Done
+      // after call to allocatable_physical_memory because that
+      // method might reduce the allocation size.
+      reasonable_max = MAX2(reasonable_max, (julong)InitialHeapSize);
+    }
+
     if (PrintGCDetails && Verbose) {
       // Cannot use gclog_or_tty yet.
-      tty->print_cr("  Max heap size for server class platform "
-                    SIZE_FORMAT, reasonable_max);
+      tty->print_cr("  Maximum heap size " SIZE_FORMAT, reasonable_max);
     }
-    // If the initial_heap_size has not been set with -Xms,
-    // then set it as fraction of size of physical memory
-    // respecting the maximum and minimum sizes of the heap.
-    if (initial_heap_size() == 0) {
-      const uint64_t reasonable_initial_fraction =
-        os::physical_memory() / DefaultInitialRAMFraction;
-      const size_t reasonable_initial =
-        (size_t) os::allocatable_physical_memory(reasonable_initial_fraction);
-      const size_t minimum_size = NewSize + OldSize;
-      set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_max),
-                                minimum_size));
-      // Currently the minimum size and the initial heap sizes are the same.
-      set_min_heap_size(initial_heap_size());
-      if (PrintGCDetails && Verbose) {
-        // Cannot use gclog_or_tty yet.
-        tty->print_cr("  Initial heap size for server class platform "
-                      SIZE_FORMAT, initial_heap_size());
-      }
-    } else {
-      // A minimum size was specified on the command line.  Be sure
-      // that the maximum size is consistent.
-      if (initial_heap_size() > reasonable_max) {
-        reasonable_max = initial_heap_size();
-      }
+    FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx)reasonable_max);
+  }
+
+  // If the initial_heap_size has not been set with InitialHeapSize
+  // or -Xms, then set it as fraction of the size of physical memory,
+  // respecting the maximum and minimum sizes of the heap.
+  if (FLAG_IS_DEFAULT(InitialHeapSize)) {
+    julong reasonable_minimum = (julong)(OldSize + NewSize);
+
+    reasonable_minimum = MIN2(reasonable_minimum, (julong)MaxHeapSize);
+
+    reasonable_minimum = os::allocatable_physical_memory(reasonable_minimum);
+
+    julong reasonable_initial = phys_mem / InitialRAMFraction;
+
+    reasonable_initial = MAX2(reasonable_initial, reasonable_minimum);
+    reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize);
+
+    reasonable_initial = os::allocatable_physical_memory(reasonable_initial);
+
+    if (PrintGCDetails && Verbose) {
+      // Cannot use gclog_or_tty yet.
+      tty->print_cr("  Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial);
+      tty->print_cr("  Minimum heap size " SIZE_FORMAT, (uintx)reasonable_minimum);
     }
-    FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx) reasonable_max);
+    FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial);
+    set_min_heap_size((uintx)reasonable_minimum);
   }
 }
 
@@ -1438,6 +1487,20 @@
 //===========================================================================================================
 // Parsing of main arguments
 
+bool Arguments::verify_interval(uintx val, uintx min,
+                                uintx max, const char* name) {
+  // Returns true iff value is in the inclusive interval [min..max]
+  // false, otherwise.
+  if (val >= min && val <= max) {
+    return true;
+  }
+  jio_fprintf(defaultStream::error_stream(),
+              "%s of " UINTX_FORMAT " is invalid; must be between " UINTX_FORMAT
+              " and " UINTX_FORMAT "\n",
+              name, val, min, max);
+  return false;
+}
+
 bool Arguments::verify_percentage(uintx value, const char* name) {
   if (value <= 100) {
     return true;
@@ -1448,7 +1511,7 @@
   return false;
 }
 
-static void set_serial_gc_flags() {
+static void force_serial_gc() {
   FLAG_SET_DEFAULT(UseSerialGC, true);
   FLAG_SET_DEFAULT(UseParNewGC, false);
   FLAG_SET_DEFAULT(UseConcMarkSweepGC, false);
@@ -1584,15 +1647,15 @@
     // force sharing off.
     if (DumpSharedSpaces || ForceSharedSpaces) {
       jio_fprintf(defaultStream::error_stream(),
-                  "Reverting to Serial GC because of %s \n",
+                  "Reverting to Serial GC because of %s\n",
                   ForceSharedSpaces ? " -Xshare:on" : "-Xshare:dump");
-      set_serial_gc_flags();
+      force_serial_gc();
       FLAG_SET_DEFAULT(SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages), false);
     } else {
-      if (UseSharedSpaces) {
+      if (UseSharedSpaces && Verbose) {
         jio_fprintf(defaultStream::error_stream(),
                     "Turning off use of shared archive because of "
-                    "choice of garbage collector or large pages \n");
+                    "choice of garbage collector or large pages\n");
       }
       no_shared_spaces();
     }
@@ -1674,6 +1737,16 @@
     status = false;
   }
 
+  status = status && verify_interval(RefDiscoveryPolicy,
+                                     ReferenceProcessor::DiscoveryPolicyMin,
+                                     ReferenceProcessor::DiscoveryPolicyMax,
+                                     "RefDiscoveryPolicy");
+
+  // Limit the lower bound of this flag to 1 as it is used in a division
+  // expression.
+  status = status && verify_interval(TLABWasteTargetPercent,
+                                     1, 100, "TLABWasteTargetPercent");
+
   return status;
 }
 
@@ -1817,7 +1890,6 @@
         FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
       } else if (!strcmp(tail, ":gc")) {
         FLAG_SET_CMDLINE(bool, PrintGC, true);
-        FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
       } else if (!strcmp(tail, ":jni")) {
         FLAG_SET_CMDLINE(bool, PrintJNIResolving, true);
       }
@@ -1925,8 +1997,8 @@
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
-      FLAG_SET_CMDLINE(uintx, MaxNewSize, (size_t) long_initial_eden_size);
-      FLAG_SET_CMDLINE(uintx, NewSize, (size_t) long_initial_eden_size);
+      FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size);
+      FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size);
     // -Xms
     } else if (match_option(option, "-Xms", &tail)) {
       julong long_initial_heap_size = 0;
@@ -1937,9 +2009,9 @@
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
-      set_initial_heap_size((size_t) long_initial_heap_size);
+      FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size);
       // Currently the minimum size and the initial heap sizes are the same.
-      set_min_heap_size(initial_heap_size());
+      set_min_heap_size(InitialHeapSize);
     // -Xmx
     } else if (match_option(option, "-Xmx", &tail)) {
       julong long_max_heap_size = 0;
@@ -1950,7 +2022,7 @@
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
-      FLAG_SET_CMDLINE(uintx, MaxHeapSize, (size_t) long_max_heap_size);
+      FLAG_SET_CMDLINE(uintx, MaxHeapSize, (uintx)long_max_heap_size);
     // Xmaxf
     } else if (match_option(option, "-Xmaxf", &tail)) {
       int maxf = (int)(atof(tail) * 100);
@@ -2196,9 +2268,9 @@
 
       if (FLAG_IS_DEFAULT(MaxHeapSize)) {
          FLAG_SET_CMDLINE(uintx, MaxHeapSize, initHeapSize);
-         set_initial_heap_size(MaxHeapSize);
+         FLAG_SET_CMDLINE(uintx, InitialHeapSize, initHeapSize);
          // Currently the minimum size and the initial heap sizes are the same.
-         set_min_heap_size(initial_heap_size());
+         set_min_heap_size(initHeapSize);
       }
       if (FLAG_IS_DEFAULT(NewSize)) {
          // Make the young generation 3/8ths of the total heap.
@@ -2337,22 +2409,25 @@
                   "ExtendedDTraceProbes flag is only applicable on Solaris\n");
       return JNI_EINVAL;
 #endif // ndef SOLARIS
-    } else
 #ifdef ASSERT
-    if (match_option(option, "-XX:+FullGCALot", &tail)) {
+    } else if (match_option(option, "-XX:+FullGCALot", &tail)) {
       FLAG_SET_CMDLINE(bool, FullGCALot, true);
       // disable scavenge before parallel mark-compact
       FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
-    } else
 #endif
-    if (match_option(option, "-XX:ParCMSPromoteBlocksToClaim=", &tail)) {
+    } else if (match_option(option, "-XX:CMSParPromoteBlocksToClaim=", &tail)) {
       julong cms_blocks_to_claim = (julong)atol(tail);
       FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
       jio_fprintf(defaultStream::error_stream(),
-        "Please use -XX:CMSParPromoteBlocksToClaim in place of "
+        "Please use -XX:OldPLABSize in place of "
+        "-XX:CMSParPromoteBlocksToClaim in the future\n");
+    } else if (match_option(option, "-XX:ParCMSPromoteBlocksToClaim=", &tail)) {
+      julong cms_blocks_to_claim = (julong)atol(tail);
+      FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
+      jio_fprintf(defaultStream::error_stream(),
+        "Please use -XX:OldPLABSize in place of "
         "-XX:ParCMSPromoteBlocksToClaim in the future\n");
-    } else
-    if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
+    } else if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
       julong old_plab_size = 0;
       ArgsRange errcode = parse_memory_size(tail, &old_plab_size, 1);
       if (errcode != arg_in_range) {
@@ -2365,8 +2440,7 @@
       jio_fprintf(defaultStream::error_stream(),
                   "Please use -XX:OldPLABSize in place of "
                   "-XX:ParallelGCOldGenAllocBufferSize in the future\n");
-    } else
-    if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
+    } else if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
       julong young_plab_size = 0;
       ArgsRange errcode = parse_memory_size(tail, &young_plab_size, 1);
       if (errcode != arg_in_range) {
@@ -2379,8 +2453,7 @@
       jio_fprintf(defaultStream::error_stream(),
                   "Please use -XX:YoungPLABSize in place of "
                   "-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
-    } else
-    if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
+    } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
       // Skip -XX:Flags= since that case has already been handled
       if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
         if (!process_argument(tail, args->ignoreUnrecognized, origin)) {
@@ -2451,6 +2524,9 @@
     SOLARIS_ONLY(FLAG_SET_DEFAULT(UseISM, false));
   }
 
+  // Tiered compilation is undefined with C1.
+  TieredCompilation = false;
+
 #else
   if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
     FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
@@ -2600,6 +2676,10 @@
     if (match_option(option, "-XX:-IgnoreUnrecognizedVMOptions", &tail)) {
       IgnoreUnrecognizedVMOptions = false;
     }
+    if (match_option(option, "-XX:+PrintFlagsInitial", &tail)) {
+      CommandLineFlags::printFlags();
+      vm_exit(0);
+    }
   }
 
   if (IgnoreUnrecognizedVMOptions) {
@@ -2666,17 +2746,27 @@
     }
     ScavengeRootsInCode = 1;
   }
+#ifdef COMPILER2
+  if (EnableInvokeDynamic && DoEscapeAnalysis) {
+    // TODO: We need to find rules for invokedynamic and EA.  For now,
+    // simply disable EA by default.
+    if (FLAG_IS_DEFAULT(DoEscapeAnalysis)) {
+      DoEscapeAnalysis = false;
+    }
+  }
+#endif
 
   if (PrintGCDetails) {
     // Turn on -verbose:gc options as well
     PrintGC = true;
-    if (FLAG_IS_DEFAULT(TraceClassUnloading)) {
-      TraceClassUnloading = true;
-    }
   }
 
+#if defined(_LP64) && defined(COMPILER1)
+  UseCompressedOops = false;
+#endif
+
 #ifdef SERIALGC
-  set_serial_gc_flags();
+  force_serial_gc();
 #endif // SERIALGC
 #ifdef KERNEL
   no_shared_spaces();
@@ -2685,24 +2775,39 @@
   // Set flags based on ergonomics.
   set_ergonomics_flags();
 
+#ifdef _LP64
+  // XXX JSR 292 currently does not support compressed oops.
+  if (EnableMethodHandles && UseCompressedOops) {
+    if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
+      UseCompressedOops = false;
+    }
+  }
+#endif // _LP64
+
   // Check the GC selections again.
   if (!check_gc_consistency()) {
     return JNI_EINVAL;
   }
 
-  if (UseParallelGC || UseParallelOldGC) {
-    // Set some flags for ParallelGC if needed.
-    set_parallel_gc_flags();
-  } else if (UseConcMarkSweepGC) {
-    // Set some flags for CMS
+#ifndef KERNEL
+  if (UseConcMarkSweepGC) {
+    // Set flags for CMS and ParNew.  Check UseConcMarkSweep first
+    // to ensure that when both UseConcMarkSweepGC and UseParNewGC
+    // are true, we don't call set_parnew_gc_flags() as well.
     set_cms_and_parnew_gc_flags();
-  } else if (UseParNewGC) {
-    // Set some flags for ParNew
-    set_parnew_gc_flags();
-  } else if (UseG1GC) {
-    // Set some flags for garbage-first, if needed.
-    set_g1_gc_flags();
+  } else {
+    // Set heap size based on available physical memory
+    set_heap_size();
+    // Set per-collector flags
+    if (UseParallelGC || UseParallelOldGC) {
+      set_parallel_gc_flags();
+    } else if (UseParNewGC) {
+      set_parnew_gc_flags();
+    } else if (UseG1GC) {
+      set_g1_gc_flags();
+    }
   }
+#endif // KERNEL
 
 #ifdef SERIALGC
   assert(verify_serial_gc_flags(), "SerialGC unset");
@@ -2715,9 +2820,16 @@
   set_aggressive_opts_flags();
 
 #ifdef CC_INTERP
-  // Biased locking is not implemented with c++ interpreter
+  // Clear flags not supported by the C++ interpreter
+  FLAG_SET_DEFAULT(ProfileInterpreter, false);
   FLAG_SET_DEFAULT(UseBiasedLocking, false);
-#endif /* CC_INTERP */
+  LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
+#endif // CC_INTERP
+
+#ifdef ZERO
+  // Clear flags not supported by Zero
+  FLAG_SET_DEFAULT(TaggedStackInterpreter, false);
+#endif // ZERO
 
 #ifdef COMPILER2
   if (!UseBiasedLocking || EmitSync != 0) {
@@ -2725,15 +2837,27 @@
   }
 #endif
 
+  if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
+    warning("PrintAssembly is enabled; turning on DebugNonSafepoints to gain additional output");
+    DebugNonSafepoints = true;
+  }
+
+#ifndef PRODUCT
+  if (CompileTheWorld) {
+    // Force NmethodSweeper to sweep whole CodeCache each time.
+    if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
+      NmethodSweepFraction = 1;
+    }
+  }
+#endif
+
   if (PrintCommandLineFlags) {
     CommandLineFlags::printSetFlags();
   }
 
-#ifdef ASSERT
   if (PrintFlagsFinal) {
     CommandLineFlags::printFlags();
   }
-#endif
 
   return JNI_OK;
 }
--- a/src/share/vm/runtime/arguments.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/arguments.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -254,7 +254,6 @@
   static bool   _has_profile;
   static bool   _has_alloc_profile;
   static const char*  _gc_log_filename;
-  static uintx  _initial_heap_size;
   static uintx  _min_heap_size;
 
   // -Xrun arguments
@@ -300,8 +299,8 @@
   static void set_g1_gc_flags();
   // GC ergonomics
   static void set_ergonomics_flags();
-  // Setup heap size for a server platform
-  static void set_server_heap_size();
+  // Setup heap size
+  static void set_heap_size();
   // Based on automatic selection criteria, should the
   // low pause collector be used.
   static bool should_auto_select_low_pause_collector();
@@ -337,6 +336,8 @@
   static bool is_bad_option(const JavaVMOption* option, jboolean ignore) {
     return is_bad_option(option, ignore, NULL);
   }
+  static bool verify_interval(uintx val, uintx min,
+                              uintx max, const char* name);
   static bool verify_percentage(uintx value, const char* name);
   static void describe_range_error(ArgsRange errcode);
   static ArgsRange check_memory_size(julong size, julong min_size);
@@ -434,9 +435,7 @@
   static bool has_profile()                 { return _has_profile; }
   static bool has_alloc_profile()           { return _has_alloc_profile; }
 
-  // -Xms , -Xmx
-  static uintx initial_heap_size()          { return _initial_heap_size; }
-  static void  set_initial_heap_size(uintx v) { _initial_heap_size = v;  }
+  // -Xms, -Xmx
   static uintx min_heap_size()              { return _min_heap_size; }
   static void  set_min_heap_size(uintx v)   { _min_heap_size = v;  }
 
--- a/src/share/vm/runtime/compilationPolicy.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/compilationPolicy.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,7 @@
   if (!canBeCompiled(m))      return false;
 
   return !UseInterpreter ||                                              // must compile all methods
-         (UseCompiler && AlwaysCompileLoopMethods && m->has_loops()); // eagerly compile loop methods
+         (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
 }
 
 // Returns true if m is allowed to be compiled
@@ -74,6 +74,16 @@
   if (m->is_abstract()) return false;
   if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
 
+  // Math intrinsics should never be compiled as this can lead to
+  // monotonicity problems because the interpreter will prefer the
+  // compiled code to the intrinsic version.  This can't happen in
+  // production because the invocation counter can't be incremented
+  // but we shouldn't expose the system to this problem in testing
+  // modes.
+  if (!AbstractInterpreter::can_be_compiled(m)) {
+    return false;
+  }
+
   return !m->is_not_compilable();
 }
 
@@ -127,7 +137,7 @@
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
 
-  if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) {
+  if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
     nmethod* nm = m->code();
     if (nm == NULL ) {
       const char* comment = "count";
@@ -152,7 +162,7 @@
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
-  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) {
+  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
     CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
 
     NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
@@ -194,7 +204,7 @@
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
 
-  if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) {
+  if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
     ResourceMark rm(THREAD);
     JavaThread *thread = (JavaThread*)THREAD;
     frame       fr     = thread->last_frame();
@@ -238,7 +248,7 @@
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
-  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) {
+  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
     CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
 
     NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
--- a/src/share/vm/runtime/deoptimization.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/deoptimization.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -145,6 +145,27 @@
     if (EliminateAllocations) {
       assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
       GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
+
+      // The flag return_oop() indicates call sites which return oop
+      // in compiled code. Such sites include java method calls,
+      // runtime calls (for example, used to allocate new objects/arrays
+      // on slow code path) and any other calls generated in compiled code.
+      // It is not guaranteed that we can get such information here only
+      // by analyzing bytecode in deoptimized frames. This is why this flag
+      // is set during method compilation (see Compile::Process_OopMap_Node()).
+      bool save_oop_result = chunk->at(0)->scope()->return_oop();
+      Handle return_value;
+      if (save_oop_result) {
+        // Reallocation may trigger GC. If deoptimization happened on return from
+        // call which returns oop we need to save it since it is not in oopmap.
+        oop result = deoptee.saved_oop_result(&map);
+        assert(result == NULL || result->is_oop(), "must be oop");
+        return_value = Handle(thread, result);
+        assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
+        if (TraceDeoptimization) {
+          tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, result, thread);
+        }
+      }
       bool reallocated = false;
       if (objects != NULL) {
         JRT_BLOCK
@@ -158,8 +179,12 @@
           ttyLocker ttyl;
           tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
           print_objects(objects);
+        }
+#endif
       }
-#endif
+      if (save_oop_result) {
+        // Restore result.
+        deoptee.set_saved_oop_result(&map, return_value());
       }
     }
     if (EliminateLocks) {
@@ -235,6 +260,12 @@
   assert(cb->frame_size() >= 0, "Unexpected frame size");
   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
 
+  // If the deopt call site is a MethodHandle invoke call site we have
+  // to adjust the unpack_sp.
+  nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
+  if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
+    unpack_sp = deoptee.unextended_sp();
+
 #ifdef ASSERT
   assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
   Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
@@ -907,21 +938,6 @@
   if (TraceDeoptimization) {
     ttyLocker ttyl;
     tty->print_cr("     Created vframeArray " INTPTR_FORMAT, array);
-    if (Verbose) {
-      int count = 0;
-      // this used to leak deoptimizedVFrame like it was going out of style!!!
-      for (int index = 0; index < array->frames(); index++ ) {
-        vframeArrayElement* e = array->element(index);
-        e->print(tty);
-
-        /*
-          No printing yet.
-        array->vframe_at(index)->print_activation(count++);
-        // better as...
-        array->print_activation_for(index, count++);
-        */
-      }
-    }
   }
 #endif // PRODUCT
 
@@ -1332,13 +1348,14 @@
     // Whether the interpreter is producing MDO data or not, we also need
     // to use the MDO to detect hot deoptimization points and control
     // aggressive optimization.
+    bool inc_recompile_count = false;
+    ProfileData* pdata = NULL;
     if (ProfileTraps && update_trap_state && trap_mdo.not_null()) {
       assert(trap_mdo() == get_method_data(thread, trap_method, false), "sanity");
       uint this_trap_count = 0;
       bool maybe_prior_trap = false;
       bool maybe_prior_recompile = false;
-      ProfileData* pdata
-        = query_update_method_data(trap_mdo, trap_bci, reason,
+      pdata = query_update_method_data(trap_mdo, trap_bci, reason,
                                    //outputs:
                                    this_trap_count,
                                    maybe_prior_trap,
@@ -1374,18 +1391,7 @@
         // Detect repeated recompilation at the same BCI, and enforce a limit.
         if (make_not_entrant && maybe_prior_recompile) {
           // More than one recompile at this point.
-          trap_mdo->inc_overflow_recompile_count();
-          if (maybe_prior_trap
-              && ((uint)trap_mdo->overflow_recompile_count()
-                  > (uint)PerBytecodeRecompilationCutoff)) {
-            // Give up on the method containing the bad BCI.
-            if (trap_method() == nm->method()) {
-              make_not_compilable = true;
-            } else {
-              trap_method->set_not_compilable();
-              // But give grace to the enclosing nm->method().
-            }
-          }
+          inc_recompile_count = maybe_prior_trap;
         }
       } else {
         // For reasons which are not recorded per-bytecode, we simply
@@ -1412,7 +1418,17 @@
         reset_counters = true;
       }
 
-      if (make_not_entrant && pdata != NULL) {
+    }
+
+    // Take requested actions on the method:
+
+    // Recompile
+    if (make_not_entrant) {
+      if (!nm->make_not_entrant()) {
+        return; // the call did not change nmethod's state
+      }
+
+      if (pdata != NULL) {
         // Record the recompilation event, if any.
         int tstate0 = pdata->trap_state();
         int tstate1 = trap_state_set_recompiled(tstate0, true);
@@ -1421,7 +1437,19 @@
       }
     }
 
-    // Take requested actions on the method:
+    if (inc_recompile_count) {
+      trap_mdo->inc_overflow_recompile_count();
+      if ((uint)trap_mdo->overflow_recompile_count() >
+          (uint)PerBytecodeRecompilationCutoff) {
+        // Give up on the method containing the bad BCI.
+        if (trap_method() == nm->method()) {
+          make_not_compilable = true;
+        } else {
+          trap_method->set_not_compilable();
+          // But give grace to the enclosing nm->method().
+        }
+      }
+    }
 
     // Reset invocation counters
     if (reset_counters) {
@@ -1431,13 +1459,8 @@
         reset_invocation_counter(trap_scope);
     }
 
-    // Recompile
-    if (make_not_entrant) {
-      nm->make_not_entrant();
-    }
-
     // Give up compiling
-    if (make_not_compilable) {
+    if (make_not_compilable && !nm->method()->is_not_compilable()) {
       assert(make_not_entrant, "consistent");
       nm->method()->set_not_compilable();
     }
@@ -1510,9 +1533,11 @@
       if (tstate1 != tstate0)
         pdata->set_trap_state(tstate1);
     } else {
-      if (LogCompilation && xtty != NULL)
+      if (LogCompilation && xtty != NULL) {
+        ttyLocker ttyl;
         // Missing MDP?  Leave a small complaint in the log.
         xtty->elem("missing_mdp bci='%d'", trap_bci);
+      }
     }
   }
 
@@ -1666,13 +1691,15 @@
   "class_check",
   "array_check",
   "intrinsic",
+  "bimorphic",
   "unloaded",
   "uninitialized",
   "unreached",
   "unhandled",
   "constraint",
   "div0_check",
-  "age"
+  "age",
+  "predicate"
 };
 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
   // Note:  Keep this in sync. with enum DeoptAction.
--- a/src/share/vm/runtime/deoptimization.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/deoptimization.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -33,12 +33,15 @@
   enum DeoptReason {
     Reason_many = -1,             // indicates presence of several reasons
     Reason_none = 0,              // indicates absence of a relevant deopt.
+    // Next 7 reasons are recorded per bytecode in DataLayout::trap_bits
     Reason_null_check,            // saw unexpected null or zero divisor (@bci)
     Reason_null_assert,           // saw unexpected non-null or non-zero (@bci)
     Reason_range_check,           // saw unexpected array index (@bci)
     Reason_class_check,           // saw unexpected object class (@bci)
     Reason_array_check,           // saw unexpected array class (aastore @bci)
     Reason_intrinsic,             // saw unexpected operand to intrinsic (@bci)
+    Reason_bimorphic,             // saw unexpected object class in bimorphic inlining (@bci)
+
     Reason_unloaded,              // unloaded class or constant pool entry
     Reason_uninitialized,         // bad class state (uninitialized)
     Reason_unreached,             // code is not reached, compiler
@@ -46,9 +49,10 @@
     Reason_constraint,            // arbitrary runtime constraint violated
     Reason_div0_check,            // a null_check due to division by zero
     Reason_age,                   // nmethod too old; tier threshold reached
+    Reason_predicate,             // compiler generated predicate failed
     Reason_LIMIT,
     // Note:  Keep this enum in sync. with _trap_reason_name.
-    Reason_RECORDED_LIMIT = Reason_unloaded   // some are not recorded per bc
+    Reason_RECORDED_LIMIT = Reason_bimorphic  // some are not recorded per bc
     // Note:  Reason_RECORDED_LIMIT should be < 8 to fit into 3 bits of
     // DataLayout::trap_bits.  This dependency is enforced indirectly
     // via asserts, to avoid excessive direct header-to-header dependencies.
@@ -278,7 +282,7 @@
                                        int trap_state);
 
   static bool reason_is_recorded_per_bytecode(DeoptReason reason) {
-    return reason > Reason_none && reason < Reason_RECORDED_LIMIT;
+    return reason > Reason_none && reason <= Reason_RECORDED_LIMIT;
   }
 
   static DeoptReason reason_recorded_per_bytecode_if_any(DeoptReason reason) {
--- a/src/share/vm/runtime/frame.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/frame.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -107,7 +107,11 @@
 
 address frame::raw_pc() const {
   if (is_deoptimized_frame()) {
-    return ((nmethod*) cb())->deopt_handler_begin() - pc_return_offset;
+    nmethod* nm = cb()->as_nmethod_or_null();
+    if (nm->is_method_handle_return(pc()))
+      return nm->deopt_mh_handler_begin() - pc_return_offset;
+    else
+      return nm->deopt_handler_begin() - pc_return_offset;
   } else {
     return (pc() - pc_return_offset);
   }
@@ -269,10 +273,16 @@
   } // NeedsDeoptSuspend
 
 
-  address deopt = nm->deopt_handler_begin();
+  // If the call site is a MethodHandle call site use the MH deopt
+  // handler.
+  address deopt = nm->is_method_handle_return(pc()) ?
+    nm->deopt_mh_handler_begin() :
+    nm->deopt_handler_begin();
+
   // Save the original pc before we patch in the new one
   nm->set_original_pc(this, pc());
   patch_pc(thread, deopt);
+
 #ifdef ASSERT
   {
     RegisterMap map(thread, false);
@@ -596,12 +606,12 @@
   for (BasicObjectLock* current = interpreter_frame_monitor_end();
        current < interpreter_frame_monitor_begin();
        current = next_monitor_in_interpreter_frame(current)) {
-    st->print_cr(" [ - obj ");
+    st->print(" - obj    [");
     current->obj()->print_value_on(st);
-    st->cr();
-    st->print_cr(" - lock ");
+    st->print_cr("]");
+    st->print(" - lock   [");
     current->lock()->print_on(st);
-    st->cr();
+    st->print_cr("]");
   }
   // monitor
   st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin());
@@ -769,9 +779,9 @@
 
 class InterpretedArgumentOopFinder: public SignatureInfo {
  private:
-  OopClosure* _f;      // Closure to invoke
-  int    _offset;      // TOS-relative offset, decremented with each argument
-  bool   _is_static;   // true if the callee is a static method
+  OopClosure* _f;        // Closure to invoke
+  int    _offset;        // TOS-relative offset, decremented with each argument
+  bool   _has_receiver;  // true if the callee has a receiver
   frame* _fr;
 
   void set(int size, BasicType type) {
@@ -786,9 +796,9 @@
   }
 
  public:
-  InterpretedArgumentOopFinder(symbolHandle signature, bool is_static, frame* fr, OopClosure* f) : SignatureInfo(signature) {
+  InterpretedArgumentOopFinder(symbolHandle signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) {
     // compute size of arguments
-    int args_size = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+    int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
     assert(!fr->is_interpreted_frame() ||
            args_size <= fr->interpreter_frame_expression_stack_size(),
             "args cannot be on stack anymore");
@@ -796,11 +806,10 @@
     _f         = f;
     _fr        = fr;
     _offset    = args_size;
-    _is_static = is_static;
   }
 
   void oops_do() {
-    if (!_is_static) {
+    if (_has_receiver) {
       --_offset;
       oop_offset_do();
     }
@@ -912,7 +921,7 @@
   int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
 
   symbolHandle signature;
-  bool is_static = false;
+  bool has_receiver = false;
 
   // Process a callee's arguments if we are at a call site
   // (i.e., if we are at an invoke bytecode)
@@ -922,7 +931,7 @@
     Bytecode_invoke *call = Bytecode_invoke_at_check(m, bci);
     if (call != NULL) {
       signature = symbolHandle(thread, call->signature());
-      is_static = call->is_invokestatic();
+      has_receiver = call->has_receiver();
       if (map->include_argument_oops() &&
           interpreter_frame_expression_stack_size() > 0) {
         ResourceMark rm(thread);  // is this right ???
@@ -936,7 +945,7 @@
         //       code in the interpreter calls a blocking runtime
         //       routine which can cause this code to be executed).
         //       (was bug gri 7/27/98)
-        oops_interpreted_arguments_do(signature, is_static, f);
+        oops_interpreted_arguments_do(signature, has_receiver, f);
       }
     }
   }
@@ -950,7 +959,7 @@
     mask = &oopmap_mask;
 #endif // ASSERT
     oops_interpreted_locals_do(f, max_locals, mask);
-    oops_interpreted_expressions_do(f, signature, is_static,
+    oops_interpreted_expressions_do(f, signature, has_receiver,
                                     m->max_stack(),
                                     max_locals, mask);
   } else {
@@ -992,7 +1001,7 @@
 
 void frame::oops_interpreted_expressions_do(OopClosure *f,
                                       symbolHandle signature,
-                                      bool is_static,
+                                      bool has_receiver,
                                       int max_stack,
                                       int max_locals,
                                       InterpreterOopMap *mask) {
@@ -1005,7 +1014,7 @@
   // arguments in callee's locals.
   int args_size = 0;
   if (!signature.is_null()) {
-    args_size = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+    args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
   }
 
   intptr_t *tos_addr = interpreter_frame_tos_at(args_size);
@@ -1038,8 +1047,8 @@
   }
 }
 
-void frame::oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f) {
-  InterpretedArgumentOopFinder finder(signature, is_static, this, f);
+void frame::oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f) {
+  InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
   finder.oops_do();
 }
 
@@ -1066,8 +1075,8 @@
 class CompiledArgumentOopFinder: public SignatureInfo {
  protected:
   OopClosure*     _f;
-  int             _offset;      // the current offset, incremented with each argument
-  bool            _is_static;   // true if the callee is a static method
+  int             _offset;        // the current offset, incremented with each argument
+  bool            _has_receiver;  // true if the callee has a receiver
   frame           _fr;
   RegisterMap*    _reg_map;
   int             _arg_size;
@@ -1087,24 +1096,24 @@
   }
 
  public:
-  CompiledArgumentOopFinder(symbolHandle signature, bool is_static, OopClosure* f, frame fr,  const RegisterMap* reg_map)
+  CompiledArgumentOopFinder(symbolHandle signature, bool has_receiver, OopClosure* f, frame fr,  const RegisterMap* reg_map)
     : SignatureInfo(signature) {
 
     // initialize CompiledArgumentOopFinder
     _f         = f;
     _offset    = 0;
-    _is_static = is_static;
+    _has_receiver = has_receiver;
     _fr        = fr;
     _reg_map   = (RegisterMap*)reg_map;
-    _arg_size  = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+    _arg_size  = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
 
     int arg_size;
-    _regs = SharedRuntime::find_callee_arguments(signature(), is_static, &arg_size);
+    _regs = SharedRuntime::find_callee_arguments(signature(), has_receiver, &arg_size);
     assert(arg_size == _arg_size, "wrong arg size");
   }
 
   void oops_do() {
-    if (!_is_static) {
+    if (_has_receiver) {
       handle_oop_offset();
       _offset++;
     }
@@ -1112,9 +1121,9 @@
   }
 };
 
-void frame::oops_compiled_arguments_do(symbolHandle signature, bool is_static, const RegisterMap* reg_map, OopClosure* f) {
+void frame::oops_compiled_arguments_do(symbolHandle signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f) {
   ResourceMark rm;
-  CompiledArgumentOopFinder finder(signature, is_static, f, *this, reg_map);
+  CompiledArgumentOopFinder finder(signature, has_receiver, f, *this, reg_map);
   finder.oops_do();
 }
 
@@ -1190,9 +1199,19 @@
 
 
 void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
-         if (is_interpreted_frame())    { oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
-  } else if (is_entry_frame())          { oops_entry_do      (f, map);
-  } else if (CodeCache::contains(pc())) { oops_code_blob_do  (f, cf, map);
+#ifndef PRODUCT
+  // simulate GC crash here to dump java thread in error report
+  if (CrashGCForDumpingJavaThread) {
+    char *t = NULL;
+    *t = 'c';
+  }
+#endif
+  if (is_interpreted_frame()) {
+    oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
+  } else if (is_entry_frame()) {
+    oops_entry_do(f, map);
+  } else if (CodeCache::contains(pc())) {
+    oops_code_blob_do(f, cf, map);
   } else {
     ShouldNotReachHere();
   }
--- a/src/share/vm/runtime/frame.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/frame.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -371,7 +371,7 @@
   oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const;
 
   // Oops-do's
-  void oops_compiled_arguments_do(symbolHandle signature, bool is_static, const RegisterMap* reg_map, OopClosure* f);
+  void oops_compiled_arguments_do(symbolHandle signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f);
   void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true);
 
  private:
@@ -379,9 +379,9 @@
                                  int max_locals,
                                  InterpreterOopMap *mask);
   void oops_interpreted_expressions_do(OopClosure *f, symbolHandle signature,
-                                 bool is_static, int max_stack, int max_locals,
+                                 bool has_receiver, int max_stack, int max_locals,
                                  InterpreterOopMap *mask);
-  void oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f);
+  void oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f);
 
   // Iteration of oops
   void oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
--- a/src/share/vm/runtime/globals.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/globals.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -46,7 +46,8 @@
 bool Flag::is_unlocked() const {
   if (strcmp(kind, "{diagnostic}") == 0) {
     return UnlockDiagnosticVMOptions;
-  } else if (strcmp(kind, "{experimental}") == 0) {
+  } else if (strcmp(kind, "{experimental}") == 0 ||
+             strcmp(kind, "{C2 experimental}") == 0) {
     return UnlockExperimentalVMOptions;
   } else {
     return true;
@@ -69,9 +70,10 @@
 
 void Flag::print_on(outputStream* st) {
   st->print("%5s %-35s %c= ", type, name, (origin != DEFAULT ? ':' : ' '));
-  if (is_bool())  st->print("%-16s", get_bool() ? "true" : "false");
-  if (is_intx())  st->print("%-16ld", get_intx());
-  if (is_uintx()) st->print("%-16lu", get_uintx());
+  if (is_bool())     st->print("%-16s", get_bool() ? "true" : "false");
+  if (is_intx())     st->print("%-16ld", get_intx());
+  if (is_uintx())    st->print("%-16lu", get_uintx());
+  if (is_uint64_t()) st->print("%-16lu", get_uint64_t());
   if (is_ccstr()) {
     const char* cp = get_ccstr();
     if (cp != NULL) {
@@ -100,6 +102,8 @@
     st->print("-XX:%s=" INTX_FORMAT, name, get_intx());
   } else if (is_uintx()) {
     st->print("-XX:%s=" UINTX_FORMAT, name, get_uintx());
+  } else if (is_uint64_t()) {
+    st->print("-XX:%s=" UINT64_FORMAT, name, get_uint64_t());
   } else if (is_ccstr()) {
     st->print("-XX:%s=", name);
     const char* cp = get_ccstr();
@@ -166,6 +170,7 @@
 #define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 product}", DEFAULT },
 #define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C2 pd product}", DEFAULT },
 #define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 diagnostic}", DEFAULT },
+#define C2_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 experimental}", DEFAULT },
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
   #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
@@ -187,7 +192,7 @@
  C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
 #endif
 #ifdef COMPILER2
- C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
+ C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
 #endif
  {0, NULL, NULL}
 };
@@ -324,6 +329,32 @@
   faddr->origin = origin;
 }
 
+bool CommandLineFlags::uint64_tAt(char* name, size_t len, uint64_t* value) {
+  Flag* result = Flag::find_flag(name, len);
+  if (result == NULL) return false;
+  if (!result->is_uint64_t()) return false;
+  *value = result->get_uint64_t();
+  return true;
+}
+
+bool CommandLineFlags::uint64_tAtPut(char* name, size_t len, uint64_t* value, FlagValueOrigin origin) {
+  Flag* result = Flag::find_flag(name, len);
+  if (result == NULL) return false;
+  if (!result->is_uint64_t()) return false;
+  uint64_t old_value = result->get_uint64_t();
+  result->set_uint64_t(*value);
+  *value = old_value;
+  result->origin = origin;
+  return true;
+}
+
+void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin) {
+  Flag* faddr = address_of_flag(flag);
+  guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
+  faddr->set_uint64_t(value);
+  faddr->origin = origin;
+}
+
 bool CommandLineFlags::doubleAt(char* name, size_t len, double* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
@@ -437,6 +468,8 @@
   assert(Arguments::check_vm_args_consistency(), "Some flag settings conflict");
 }
 
+#endif // PRODUCT
+
 void CommandLineFlags::printFlags() {
   // Print the flags sorted by name
   // note: this method is called before the thread structure is in place
@@ -462,5 +495,3 @@
   }
   FREE_C_HEAP_ARRAY(Flag*, array);
 }
-
-#endif
--- a/src/share/vm/runtime/globals.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/globals.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -47,8 +47,8 @@
 define_pd_global(intx, OnStackReplacePercentage,     0);
 define_pd_global(bool, ResizeTLAB,                   false);
 define_pd_global(intx, FreqInlineSize,               0);
+define_pd_global(intx, InlineSmallCode,              0);
 define_pd_global(intx, NewSizeThreadIncrease,        4*K);
-define_pd_global(intx, NewRatio,                     4);
 define_pd_global(intx, InlineClassNatives,           true);
 define_pd_global(intx, InlineUnsafeOps,              true);
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
@@ -58,7 +58,7 @@
 define_pd_global(uintx,PermSize,    ScaleForWordSize(4*M));
 define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
 define_pd_global(bool, NeverActAsServerClassMachine, true);
-define_pd_global(uintx, DefaultMaxRAM,               1*G);
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
 #define CI_COMPILER_COUNT 0
 #else
 
@@ -113,6 +113,10 @@
   uintx get_uintx() const     { return *((uintx*) addr); }
   void set_uintx(uintx value) { *((uintx*) addr) = value; }
 
+  bool is_uint64_t() const          { return strcmp(type, "uint64_t") == 0; }
+  uint64_t get_uint64_t() const     { return *((uint64_t*) addr); }
+  void set_uint64_t(uint64_t value) { *((uint64_t*) addr) = value; }
+
   bool is_double() const        { return strcmp(type, "double") == 0; }
   double get_double() const     { return *((double*) addr); }
   void set_double(double value) { *((double*) addr) = value; }
@@ -188,6 +192,11 @@
   static bool uintxAtPut(char* name, size_t len, uintx* value, FlagValueOrigin origin);
   static bool uintxAtPut(char* name, uintx* value, FlagValueOrigin origin) { return uintxAtPut(name, strlen(name), value, origin); }
 
+  static bool uint64_tAt(char* name, size_t len, uint64_t* value);
+  static bool uint64_tAt(char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
+  static bool uint64_tAtPut(char* name, size_t len, uint64_t* value, FlagValueOrigin origin);
+  static bool uint64_tAtPut(char* name, uint64_t* value, FlagValueOrigin origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
+
   static bool doubleAt(char* name, size_t len, double* value);
   static bool doubleAt(char* name, double* value)    { return doubleAt(name, strlen(name), value); }
   static bool doubleAtPut(char* name, size_t len, double* value, FlagValueOrigin origin);
@@ -202,7 +211,7 @@
   static bool wasSetOnCmdline(const char* name, bool* value);
   static void printSetFlags();
 
-  static void printFlags() PRODUCT_RETURN;
+  static void printFlags();
 
   static void verify() PRODUCT_RETURN;
 };
@@ -318,9 +327,6 @@
   product(bool, UseMembar, false,                                           \
           "(Unstable) Issues membars on thread state transitions")          \
                                                                             \
-  product(bool, PrintCommandLineFlags, false,                               \
-          "Prints flags that appeared on the command line")                 \
-                                                                            \
   diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug,                  \
           "Enable normal processing of flags relating to field diagnostics")\
                                                                             \
@@ -736,6 +742,9 @@
   diagnostic(bool, PrintAdapterHandlers, false,                             \
           "Print code generated for i2c/c2i adapters")                      \
                                                                             \
+  develop(bool, VerifyAdapterSharing, false,                                \
+          "Verify that the code for shared adapters is the equivalent")     \
+                                                                            \
   diagnostic(bool, PrintAssembly, false,                                    \
           "Print assembly code (using external disassembler.so)")           \
                                                                             \
@@ -785,7 +794,7 @@
   product(bool, ProfilerRecordPC, false,                                    \
           "Collects tick for each 16 byte interval of compiled code")       \
                                                                             \
-  product(bool, ProfileVM,  false,                                          \
+  product(bool, ProfileVM, false,                                           \
           "Profiles ticks that fall within VM (either in the VM Thread "    \
           "or VM code called through stubs)")                               \
                                                                             \
@@ -815,7 +824,7 @@
                                                                             \
   product(bool, RegisterFinalizersAtInit, true,                             \
           "Register finalizable objects at end of Object.<init> or "        \
-          "after allocation.")                                              \
+          "after allocation")                                               \
                                                                             \
   develop(bool, RegisterReferences, true,                                   \
           "Tells whether the VM should register soft/weak/final/phantom "   \
@@ -862,14 +871,14 @@
   product(bool, AlwaysLockClassLoader, false,                               \
           "Require the VM to acquire the class loader lock before calling " \
           "loadClass() even for class loaders registering "                 \
-          "as parallel capable. Default false. ")                           \
+          "as parallel capable")                                            \
                                                                             \
   product(bool, AllowParallelDefineClass, false,                            \
           "Allow parallel defineClass requests for class loaders "          \
-          "registering as parallel capable. Default false")                 \
+          "registering as parallel capable")                                \
                                                                             \
   product(bool, MustCallLoadClassInternal, false,                           \
-          "Call loadClassInternal() rather than loadClass().Default false") \
+          "Call loadClassInternal() rather than loadClass()")               \
                                                                             \
   product_pd(bool, DontYieldALot,                                           \
           "Throw away obvious excess yield calls (for SOLARIS only)")       \
@@ -921,9 +930,9 @@
          "(Unstable, Linux-specific)"                                       \
          " avoid NPTL-FUTEX hang pthread_cond_timedwait" )                  \
                                                                             \
-  product(bool, FilterSpuriousWakeups , true,                               \
-          "Prevent spurious or premature wakeups from object.wait"              \
-          "(Solaris only)")                                                     \
+  product(bool, FilterSpuriousWakeups, true,                                \
+          "Prevent spurious or premature wakeups from object.wait "         \
+          "(Solaris only)")                                                 \
                                                                             \
   product(intx, NativeMonitorTimeout, -1, "(Unstable)" )                    \
   product(intx, NativeMonitorFlags, 0, "(Unstable)" )                       \
@@ -973,7 +982,7 @@
                                                                             \
   product(bool, UseAltSigs, false,                                          \
           "Use alternate signals instead of SIGUSR1 & SIGUSR2 for VM "      \
-          "internal signals. (Solaris only)")                               \
+          "internal signals (Solaris only)")                                \
                                                                             \
   product(bool, UseSpinning, false,                                         \
           "Use spinning in monitor inflation and before entry")             \
@@ -1195,7 +1204,7 @@
   product(bool, UseSerialGC, false,                                         \
           "Use the serial garbage collector")                               \
                                                                             \
-  experimental(bool, UseG1GC, false,                                        \
+  product(bool, UseG1GC, false,                                             \
           "Use the Garbage-First garbage collector")                        \
                                                                             \
   product(bool, UseParallelGC, false,                                       \
@@ -1265,12 +1274,12 @@
           "Always tenure objects in eden. (ParallelGC only)")               \
                                                                             \
   product(bool, NeverTenure, false,                                         \
-          "Never tenure objects in eden, May tenure on overflow"            \
-          " (ParallelGC only)")                                             \
+          "Never tenure objects in eden, May tenure on overflow "           \
+          "(ParallelGC only)")                                              \
                                                                             \
   product(bool, ScavengeBeforeFullGC, true,                                 \
-          "Scavenge youngest generation before each full GC,"               \
-          " used with UseParallelGC")                                       \
+          "Scavenge youngest generation before each full GC, "              \
+          "used with UseParallelGC")                                        \
                                                                             \
   develop(bool, ScavengeWithObjectsInToSpace, false,                        \
           "Allow scavenges to occur when to_space contains objects.")       \
@@ -1283,9 +1292,9 @@
           " (effective only when UseConcMarkSweepGC)")                      \
                                                                             \
   product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false,        \
-          "A System.gc() request invokes a concurrent collection and"       \
-          " also unloads classes during such a concurrent gc cycle  "       \
-          " (effective only when UseConcMarkSweepGC)")                      \
+          "A System.gc() request invokes a concurrent collection and "      \
+          "also unloads classes during such a concurrent gc cycle "         \
+          "(effective only when UseConcMarkSweepGC)")                       \
                                                                             \
   develop(bool, UseCMSAdaptiveFreeLists, true,                              \
           "Use Adaptive Free Lists in the CMS generation")                  \
@@ -1340,26 +1349,62 @@
           "Whether we should simulate work queue overflow in ParNew")       \
                                                                             \
   notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000,                   \
-          "An `interval' counter that determines how frequently"            \
-          " we simulate overflow; a smaller number increases frequency")    \
+          "An `interval' counter that determines how frequently "           \
+          "we simulate overflow; a smaller number increases frequency")     \
                                                                             \
   product(uintx, ParGCDesiredObjsFromOverflowList, 20,                      \
           "The desired number of objects to claim from the overflow list")  \
                                                                             \
-  product(uintx, CMSParPromoteBlocksToClaim, 50,                            \
+  product(uintx, CMSParPromoteBlocksToClaim, 16,                             \
           "Number of blocks to attempt to claim when refilling CMS LAB for "\
           "parallel GC.")                                                   \
                                                                             \
+  product(uintx, OldPLABWeight, 50,                                         \
+          "Percentage (0-100) used to weight the current sample when"       \
+          "computing exponentially decaying average for resizing CMSParPromoteBlocksToClaim.") \
+                                                                            \
+  product(bool, ResizeOldPLAB, true,                                        \
+          "Dynamically resize (old gen) promotion labs")                    \
+                                                                            \
+  product(bool, PrintOldPLAB, false,                                        \
+          "Print (old gen) promotion labs sizing decisions")                \
+                                                                            \
+  product(uintx, CMSOldPLABMin, 16,                                         \
+          "Min size of CMS gen promotion lab caches per worker per blksize")\
+                                                                            \
+  product(uintx, CMSOldPLABMax, 1024,                                       \
+          "Max size of CMS gen promotion lab caches per worker per blksize")\
+                                                                            \
+  product(uintx, CMSOldPLABNumRefills, 4,                                   \
+          "Nominal number of refills of CMS gen promotion lab cache"        \
+          " per worker per block size")                                     \
+                                                                            \
+  product(bool, CMSOldPLABResizeQuicker, false,                             \
+          "Whether to react on-the-fly during a scavenge to a sudden"       \
+          " change in block demand rate")                                   \
+                                                                            \
+  product(uintx, CMSOldPLABToleranceFactor, 4,                              \
+          "The tolerance of the phase-change detector for on-the-fly"       \
+          " PLAB resizing during a scavenge")                               \
+                                                                            \
+  product(uintx, CMSOldPLABReactivityFactor, 2,                             \
+          "The gain in the feedback loop for on-the-fly PLAB resizing"      \
+          " during a scavenge")                                             \
+                                                                            \
+  product(uintx, CMSOldPLABReactivityCeiling, 10,                           \
+          "The clamping of the gain in the feedback loop for on-the-fly"    \
+          " PLAB resizing during a scavenge")                               \
+                                                                            \
   product(bool, AlwaysPreTouch, false,                                      \
           "It forces all freshly committed pages to be pre-touched.")       \
                                                                             \
   product(bool, CMSUseOldDefaults, false,                                   \
-          "A flag temporarily  introduced to allow reverting to some older" \
-          "default settings; older as of 6.0 ")                             \
+          "A flag temporarily introduced to allow reverting to some "       \
+          "older default settings; older as of 6.0")                        \
                                                                             \
   product(intx, CMSYoungGenPerWorker, 16*M,                                 \
           "The amount of young gen chosen by default per GC worker "        \
-          "thread available ")                                              \
+          "thread available")                                               \
                                                                             \
   product(bool, GCOverheadReporting, false,                                 \
          "Enables the GC overhead reporting facility")                      \
@@ -1380,43 +1425,71 @@
           "automatically adjusted")                                         \
                                                                             \
   product(uintx, CMSIncrementalDutyCycleMin, 0,                             \
-          "Lower bound on the duty cycle when CMSIncrementalPacing is"      \
-          "enabled (a percentage, 0-100).")                                 \
+          "Lower bound on the duty cycle when CMSIncrementalPacing is "     \
+          "enabled (a percentage, 0-100)")                                  \
                                                                             \
   product(uintx, CMSIncrementalSafetyFactor, 10,                            \
-          "Percentage (0-100) used to add conservatism when computing the"  \
-          "duty cycle.")                                                    \
+          "Percentage (0-100) used to add conservatism when computing the " \
+          "duty cycle")                                                     \
                                                                             \
   product(uintx, CMSIncrementalOffset, 0,                                   \
           "Percentage (0-100) by which the CMS incremental mode duty cycle" \
-          "is shifted to the right within the period between young GCs")    \
-                                                                            \
-  product(uintx, CMSExpAvgFactor, 25,                                       \
+          " is shifted to the right within the period between young GCs")   \
+                                                                            \
+  product(uintx, CMSExpAvgFactor, 50,                                       \
           "Percentage (0-100) used to weight the current sample when"       \
           "computing exponential averages for CMS statistics.")             \
                                                                             \
-  product(uintx, CMS_FLSWeight, 50,                                         \
+  product(uintx, CMS_FLSWeight, 75,                                         \
           "Percentage (0-100) used to weight the current sample when"       \
           "computing exponentially decating averages for CMS FLS statistics.") \
                                                                             \
-  product(uintx, CMS_FLSPadding, 2,                                         \
+  product(uintx, CMS_FLSPadding, 1,                                         \
           "The multiple of deviation from mean to use for buffering"        \
           "against volatility in free list demand.")                        \
                                                                             \
   product(uintx, FLSCoalescePolicy, 2,                                      \
           "CMS: Aggression level for coalescing, increasing from 0 to 4")   \
                                                                             \
-  product(uintx, CMS_SweepWeight, 50,                                       \
-          "Percentage (0-100) used to weight the current sample when"       \
-          "computing exponentially decaying average for inter-sweep duration.") \
-                                                                            \
-  product(uintx, CMS_SweepPadding, 2,                                       \
-          "The multiple of deviation from mean to use for buffering"        \
+  product(bool, FLSAlwaysCoalesceLarge, false,                              \
+          "CMS: Larger free blocks are always available for coalescing")    \
+                                                                            \
+  product(double, FLSLargestBlockCoalesceProximity, 0.99,                   \
+          "CMS: the smaller the percentage the greater the coalition force")\
+                                                                            \
+  product(double, CMSSmallCoalSurplusPercent, 1.05,                         \
+          "CMS: the factor by which to inflate estimated demand of small"   \
+          " block sizes to prevent coalescing with an adjoining block")     \
+                                                                            \
+  product(double, CMSLargeCoalSurplusPercent, 0.95,                         \
+          "CMS: the factor by which to inflate estimated demand of large"   \
+          " block sizes to prevent coalescing with an adjoining block")     \
+                                                                            \
+  product(double, CMSSmallSplitSurplusPercent, 1.10,                        \
+          "CMS: the factor by which to inflate estimated demand of small"   \
+          " block sizes to prevent splitting to supply demand for smaller"  \
+          " blocks")                                                        \
+                                                                            \
+  product(double, CMSLargeSplitSurplusPercent, 1.00,                        \
+          "CMS: the factor by which to inflate estimated demand of large"   \
+          " block sizes to prevent splitting to supply demand for smaller"  \
+          " blocks")                                                        \
+                                                                            \
+  product(bool, CMSExtrapolateSweep, false,                                 \
+          "CMS: cushion for block demand during sweep")                     \
+                                                                            \
+  product(uintx, CMS_SweepWeight, 75,                                       \
+          "Percentage (0-100) used to weight the current sample when "      \
+          "computing exponentially decaying average for inter-sweep "       \
+          "duration")                                                       \
+                                                                            \
+  product(uintx, CMS_SweepPadding, 1,                                       \
+          "The multiple of deviation from mean to use for buffering "       \
           "against volatility in inter-sweep duration.")                    \
                                                                             \
   product(uintx, CMS_SweepTimerThresholdMillis, 10,                         \
           "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
-          " duration exceeds this threhold in milliseconds")                \
+          "duration exceeds this threhold in milliseconds")                 \
                                                                             \
   develop(bool, CMSTraceIncrementalMode, false,                             \
           "Trace CMS incremental mode")                                     \
@@ -1449,6 +1522,13 @@
   product(uintx, CMSIndexedFreeListReplenish, 4,                            \
           "Replenish and indexed free list with this number of chunks")     \
                                                                             \
+  product(bool, CMSReplenishIntermediate, true,                             \
+          "Replenish all intermediate free-list caches")                    \
+                                                                            \
+  product(bool, CMSSplitIndexedFreeListBlocks, true,                        \
+          "When satisfying batched demand, splot blocks from the "          \
+          "IndexedFreeList whose size is a multiple of requested size")     \
+                                                                            \
   product(bool, CMSLoopWarn, false,                                         \
           "Warn in case of excessive CMS looping")                          \
                                                                             \
@@ -1583,6 +1663,18 @@
           "Bitmap operations should process at most this many bits"         \
           "between yields")                                                 \
                                                                             \
+  product(bool, CMSDumpAtPromotionFailure, false,                           \
+          "Dump useful information about the state of the CMS old "         \
+          " generation upon a promotion failure.")                          \
+                                                                            \
+  product(bool, CMSPrintChunksInDump, false,                                \
+          "In a dump enabled by CMSDumpAtPromotionFailure, include "        \
+          " more detailed information about the free chunks.")              \
+                                                                            \
+  product(bool, CMSPrintObjectsInDump, false,                               \
+          "In a dump enabled by CMSDumpAtPromotionFailure, include "        \
+          " more detailed information about the allocated objects.")        \
+                                                                            \
   diagnostic(bool, FLSVerifyAllHeapReferences, false,                       \
           "Verify that all refs across the FLS boundary "                   \
           " are to valid objects")                                          \
@@ -1617,35 +1709,36 @@
                                                                             \
   product(intx, CMSTriggerRatio, 80,                                        \
           "Percentage of MinHeapFreeRatio in CMS generation that is "       \
-          "  allocated before a CMS collection cycle commences")            \
+          "allocated before a CMS collection cycle commences")              \
                                                                             \
   product(intx, CMSTriggerPermRatio, 80,                                    \
-          "Percentage of MinHeapFreeRatio in the CMS perm generation that"  \
-          "  is allocated before a CMS collection cycle commences, that  "  \
-          "  also collects the perm generation")                            \
+          "Percentage of MinHeapFreeRatio in the CMS perm generation that " \
+          "is allocated before a CMS collection cycle commences, that "     \
+          "also collects the perm generation")                              \
                                                                             \
   product(uintx, CMSBootstrapOccupancy, 50,                                 \
           "Percentage CMS generation occupancy at which to "                \
-          " initiate CMS collection for bootstrapping collection stats")    \
+          "initiate CMS collection for bootstrapping collection stats")     \
                                                                             \
   product(intx, CMSInitiatingOccupancyFraction, -1,                         \
           "Percentage CMS generation occupancy to start a CMS collection "  \
-          " cycle (A negative value means that CMSTriggerRatio is used)")   \
+          "cycle. A negative value means that CMSTriggerRatio is used")     \
                                                                             \
   product(intx, CMSInitiatingPermOccupancyFraction, -1,                     \
-          "Percentage CMS perm generation occupancy to start a CMScollection"\
-          " cycle (A negative value means that CMSTriggerPermRatio is used)")\
+          "Percentage CMS perm generation occupancy to start a "            \
+          "CMScollection cycle. A negative value means that "               \
+          "CMSTriggerPermRatio is used")                                    \
                                                                             \
   product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
           "Only use occupancy as a crierion for starting a CMS collection") \
                                                                             \
   product(intx, CMSIsTooFullPercentage, 98,                                 \
-          "An absolute ceiling above which CMS will always consider the"    \
-          " perm gen ripe for collection")                                  \
+          "An absolute ceiling above which CMS will always consider the "   \
+          "perm gen ripe for collection")                                   \
                                                                             \
   develop(bool, CMSTestInFreeList, false,                                   \
           "Check if the coalesced range is already in the "                 \
-          "free lists as claimed.")                                         \
+          "free lists as claimed")                                          \
                                                                             \
   notproduct(bool, CMSVerifyReturnedBytes, false,                           \
           "Check that all the garbage collected was returned to the "       \
@@ -1663,8 +1756,12 @@
           "Enforce ScavengeALot/GCALot at all potential safepoints")        \
                                                                             \
   product(bool, HandlePromotionFailure, true,                               \
-          "The youngest generation collection does not require"             \
-          " a guarantee of full promotion of all live objects.")            \
+          "The youngest generation collection does not require "            \
+          "a guarantee of full promotion of all live objects.")             \
+                                                                            \
+  product(bool, PrintPromotionFailure, false,                               \
+          "Print additional diagnostic information following "              \
+          " promotion failure")                                             \
                                                                             \
   notproduct(bool, PromotionFailureALot, false,                             \
           "Use promotion failure handling on every youngest generation "    \
@@ -1692,7 +1789,7 @@
           "Ratio of hard spins to calls to yield")                          \
                                                                             \
   product(uintx, PreserveMarkStackSize, 1024,                               \
-           "Size for stack used in promotion failure handling")             \
+          "Size for stack used in promotion failure handling")              \
                                                                             \
   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
                                                                             \
@@ -1720,14 +1817,27 @@
   product(bool, AlwaysActAsServerClassMachine, false,                       \
           "Always act like a server-class machine")                         \
                                                                             \
-  product_pd(uintx, DefaultMaxRAM,                                          \
-          "Maximum real memory size for setting server class heap size")    \
+  product_pd(uint64_t, MaxRAM,                                              \
+          "Real memory size (in bytes) used to set maximum heap size")      \
+                                                                            \
+  product(uintx, ErgoHeapSizeLimit, 0,                                      \
+          "Maximum ergonomically set heap size (in bytes); zero means use " \
+          "MaxRAM / MaxRAMFraction")                                        \
+                                                                            \
+  product(uintx, MaxRAMFraction, 4,                                         \
+          "Maximum fraction (1/n) of real memory used for maximum heap "    \
+          "size")                                                           \
                                                                             \
   product(uintx, DefaultMaxRAMFraction, 4,                                  \
-          "Fraction (1/n) of real memory used for server class max heap")   \
-                                                                            \
-  product(uintx, DefaultInitialRAMFraction, 64,                             \
-          "Fraction (1/n) of real memory used for server class initial heap")  \
+          "Maximum fraction (1/n) of real memory used for maximum heap "    \
+          "size; deprecated: to be renamed to MaxRAMFraction")              \
+                                                                            \
+  product(uintx, MinRAMFraction, 2,                                         \
+          "Minimum fraction (1/n) of real memory used for maxmimum heap "   \
+          "size on systems with small physical memory size")                \
+                                                                            \
+  product(uintx, InitialRAMFraction, 64,                                    \
+          "Fraction (1/n) of real memory used for initial heap size")       \
                                                                             \
   product(bool, UseAutoGCSelectPolicy, false,                               \
           "Use automatic collection selection policy")                      \
@@ -1778,7 +1888,7 @@
           "Number of collections before the adaptive sizing is started")    \
                                                                             \
   product(uintx, AdaptiveSizePolicyOutputInterval, 0,                       \
-          "Collecton interval for printing information, zero => never")     \
+          "Collecton interval for printing information; zero => never")     \
                                                                             \
   product(bool, UseAdaptiveSizePolicyFootprintGoal, true,                   \
           "Use adaptive minimum footprint as a goal")                       \
@@ -1808,7 +1918,8 @@
           "Allowed collection cost difference between generations")         \
                                                                             \
   product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50,                \
-          "If collection costs are within margin, reduce both by full delta") \
+          "If collection costs are within margin, reduce both by full "     \
+          "delta")                                                          \
                                                                             \
   product(uintx, YoungGenerationSizeIncrement, 20,                          \
           "Adaptive size percentage change in young generation")            \
@@ -1904,6 +2015,10 @@
   diagnostic(bool, GCParallelVerificationEnabled, true,                     \
           "Enable parallel memory system verification")                     \
                                                                             \
+  diagnostic(bool, DeferInitialCardMark, false,                             \
+          "When +ReduceInitialCardMarks, explicitly defer any that "        \
+           "may arise from new_pre_store_barrier")                          \
+                                                                            \
   diagnostic(bool, VerifyRememberedSets, false,                             \
           "Verify GC remembered sets")                                      \
                                                                             \
@@ -1942,9 +2057,6 @@
           "number of times a GC thread (minus the coordinator) "            \
           "will sleep while yielding before giving up and resuming GC")     \
                                                                             \
-  notproduct(bool, PrintFlagsFinal, false,                                  \
-          "Print all command line flags after argument processing")         \
-                                                                            \
   /* gc tracing */                                                          \
   manageable(bool, PrintGC, false,                                          \
           "Print message at garbage collect")                               \
@@ -2244,11 +2356,20 @@
          "If false, restricts profiled locations to the root method only")  \
                                                                             \
   product(bool, PrintVMOptions, trueInDebug,                                \
-         "print VM flag settings")                                          \
+         "Print flags that appeared on the command line")                   \
                                                                             \
   product(bool, IgnoreUnrecognizedVMOptions, false,                         \
          "Ignore unrecognized VM options")                                  \
                                                                             \
+  product(bool, PrintCommandLineFlags, false,                               \
+         "Print flags specified on command line or set by ergonomics")      \
+                                                                            \
+  product(bool, PrintFlagsInitial, false,                                   \
+         "Print all VM flags before argument processing and exit VM")       \
+                                                                            \
+  product(bool, PrintFlagsFinal, false,                                     \
+         "Print all VM flags after argument and ergonomic processing")      \
+                                                                            \
   diagnostic(bool, SerializeVMOutput, true,                                 \
          "Use a mutex to serialize output to tty and hotspot.log")          \
                                                                             \
@@ -2329,6 +2450,9 @@
   notproduct(bool, CompileTheWorldIgnoreInitErrors, false,                  \
           "Compile all methods although class initializer failed")          \
                                                                             \
+  notproduct(intx, CompileTheWorldSafepointInterval, 100,                   \
+          "Force a safepoint every n compiles so sweeper can keep up")      \
+                                                                            \
   develop(bool, TraceIterativeGVN, false,                                   \
           "Print progress during Iterative Global Value Numbering")         \
                                                                             \
@@ -2527,8 +2651,11 @@
                                                                             \
   develop(bool, VerifyCompiledCode, false,                                  \
           "Include miscellaneous runtime verifications in nmethod code; "   \
-          "off by default because it disturbs nmethod size heuristics.")    \
-                                                                            \
+          "default off because it disturbs nmethod size heuristics")        \
+                                                                            \
+  notproduct(bool, CrashGCForDumpingJavaThread, false,                      \
+          "Manually make GC thread crash then dump java stack trace;  "     \
+          "Test only")                                                      \
                                                                             \
   /* compilation */                                                         \
   product(bool, UseCompiler, true,                                          \
@@ -2644,10 +2771,10 @@
   notproduct(intx, MaxSubklassPrintSize, 4,                                 \
           "maximum number of subklasses to print when printing klass")      \
                                                                             \
-  develop(intx, MaxInlineLevel, 9,                                          \
+  product(intx, MaxInlineLevel, 9,                                          \
           "maximum number of nested calls that are inlined")                \
                                                                             \
-  develop(intx, MaxRecursiveInlineLevel, 1,                                 \
+  product(intx, MaxRecursiveInlineLevel, 1,                                 \
           "maximum number of nested recursive calls that are inlined")      \
                                                                             \
   product_pd(intx, InlineSmallCode,                                         \
@@ -2660,10 +2787,10 @@
   product_pd(intx, FreqInlineSize,                                          \
           "maximum bytecode size of a frequent method to be inlined")       \
                                                                             \
-  develop(intx, MaxTrivialSize, 6,                                          \
+  product(intx, MaxTrivialSize, 6,                                          \
           "maximum bytecode size of a trivial method to be inlined")        \
                                                                             \
-  develop(intx, MinInliningThreshold, 250,                                  \
+  product(intx, MinInliningThreshold, 250,                                  \
           "min. invocation count a method needs to have to be inlined")     \
                                                                             \
   develop(intx, AlignEntryCode, 4,                                          \
@@ -2740,7 +2867,7 @@
   product(intx, PerMethodRecompilationCutoff, 400,                          \
           "After recompiling N times, stay in the interpreter (-1=>'Inf')") \
                                                                             \
-  product(intx, PerBytecodeRecompilationCutoff, 100,                        \
+  product(intx, PerBytecodeRecompilationCutoff, 200,                        \
           "Per-BCI limit on repeated recompilation (-1=>'Inf')")            \
                                                                             \
   product(intx, PerMethodTrapLimit,  100,                                   \
@@ -2789,20 +2916,28 @@
           "an OS lock")                                                     \
                                                                             \
   /* gc parameters */                                                       \
-  product(uintx, MaxHeapSize, ScaleForWordSize(64*M),                       \
-          "Default maximum size for object heap (in bytes)")                \
-                                                                            \
-  product_pd(uintx, NewSize,                                                \
-          "Default size of new generation (in bytes)")                      \
+  product(uintx, InitialHeapSize, 0,                                        \
+          "Initial heap size (in bytes); zero means OldSize + NewSize")     \
+                                                                            \
+  product(uintx, MaxHeapSize, ScaleForWordSize(96*M),                       \
+          "Maximum heap size (in bytes)")                                   \
+                                                                            \
+  product(uintx, OldSize, ScaleForWordSize(4*M),                            \
+          "Initial tenured generation size (in bytes)")                     \
+                                                                            \
+  product(uintx, NewSize, ScaleForWordSize(4*M),                            \
+          "Initial new generation size (in bytes)")                         \
                                                                             \
   product(uintx, MaxNewSize, max_uintx,                                     \
-          "Maximum size of new generation (in bytes)")                      \
+          "Maximum new generation size (in bytes), max_uintx means set "    \
+          "ergonomically")                                                  \
                                                                             \
   product(uintx, PretenureSizeThreshold, 0,                                 \
-          "Max size in bytes of objects allocated in DefNew generation")    \
-                                                                            \
-  product_pd(uintx, TLABSize,                                               \
-          "Default (or starting) size of TLAB (in bytes)")                  \
+          "Maximum size in bytes of objects allocated in DefNew "           \
+          "generation; zero means no maximum")                              \
+                                                                            \
+  product(uintx, TLABSize, 0,                                               \
+          "Starting TLAB size (in bytes); zero means set ergonomically")    \
                                                                             \
   product(uintx, MinTLABSize, 2*K,                                          \
           "Minimum allowed TLAB size (in bytes)")                           \
@@ -2819,10 +2954,10 @@
   product(uintx, TLABWasteIncrement,    4,                                  \
           "Increment allowed waste at slow allocation")                     \
                                                                             \
-  product_pd(intx, SurvivorRatio,                                           \
+  product(intx, SurvivorRatio, 8,                                           \
           "Ratio of eden/survivor space size")                              \
                                                                             \
-  product_pd(intx, NewRatio,                                                \
+  product(intx, NewRatio, 2,                                                \
           "Ratio of new/old generation sizes")                              \
                                                                             \
   product(uintx, MaxLiveObjectEvacuationRatio, 100,                         \
@@ -2832,11 +2967,8 @@
           "Additional size added to desired new generation size per "       \
           "non-daemon thread (in bytes)")                                   \
                                                                             \
-  product(uintx, OldSize, ScaleForWordSize(4096*K),                         \
-          "Default size of tenured generation (in bytes)")                  \
-                                                                            \
   product_pd(uintx, PermSize,                                               \
-          "Default size of permanent generation (in bytes)")                \
+          "Initial size of permanent generation (in bytes)")                \
                                                                             \
   product_pd(uintx, MaxPermSize,                                            \
           "Maximum size of permanent generation (in bytes)")                \
@@ -2988,6 +3120,15 @@
   notproduct(bool, ExitOnFullCodeCache, false,                              \
           "Exit the VM if we fill the code cache.")                         \
                                                                             \
+  product(bool, UseCodeCacheFlushing, false,                                \
+          "Attempt to clean the code cache before shutting off compiler")   \
+                                                                            \
+  product(intx,  MinCodeCacheFlushingInterval, 30,                          \
+          "Min number of seconds between code cache cleaning sessions")     \
+                                                                            \
+  product(uintx,  CodeCacheFlushingMinimumFreeSpace, 1500*K,                \
+          "When less than X space left, start code cache cleaning")         \
+                                                                            \
   /* interpreter debugging */                                               \
   develop(intx, BinarySwitchThreshold, 5,                                   \
           "Minimal number of lookupswitch entries for rewriting to binary " \
@@ -3334,6 +3475,9 @@
   diagnostic(bool, OptimizeMethodHandles, true,                             \
           "when constructing method handles, try to improve them")          \
                                                                             \
+  experimental(bool, TrustFinalNonStaticFields, false,                      \
+          "trust final non-static declarations for constant folding")       \
+                                                                            \
   experimental(bool, EnableInvokeDynamic, false,                            \
           "recognize the invokedynamic instruction")                        \
                                                                             \
--- a/src/share/vm/runtime/globals_extension.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/globals_extension.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -64,6 +64,7 @@
 #define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
 #define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
 #define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
+#define C2_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
   #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
@@ -84,7 +85,7 @@
  C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
 #endif
 #ifdef COMPILER2
- C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
+ C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
 #endif
  NUM_CommandLineFlag
 } CommandLineFlag;
@@ -130,6 +131,7 @@
 #define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
 #define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
 #define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
   #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
@@ -181,6 +183,7 @@
           C2_PRODUCT_FLAG_MEMBER_WITH_TYPE,
           C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
           C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
+          C2_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE,
           C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
 #endif
  NUM_CommandLineFlagWithType
@@ -202,6 +205,7 @@
   static void boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin);
   static void intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin);
   static void uintxAtPut(CommandLineFlagWithType flag, uintx value, FlagValueOrigin origin);
+  static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin);
   static void doubleAtPut(CommandLineFlagWithType flag, double value, FlagValueOrigin origin);
   static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin);
 
--- a/src/share/vm/runtime/jniHandles.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/jniHandles.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -144,7 +144,7 @@
   EXCEPTION_MARK;
   // We will never reach the CATCH below since Exceptions::_throw will cause
   // the VM to exit if an exception is thrown during initialization
-  klassOop k      = SystemDictionary::object_klass();
+  klassOop k      = SystemDictionary::Object_klass();
   _deleted_handle = instanceKlass::cast(k)->allocate_permanent_instance(CATCH);
 }
 
--- a/src/share/vm/runtime/os.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/os.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -280,7 +280,7 @@
                            string,
                            CHECK);
 
-    KlassHandle group(THREAD, SystemDictionary::threadGroup_klass());
+    KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
     JavaCalls::call_special(&result,
                             thread_group,
                             group,
--- a/src/share/vm/runtime/os.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/os.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -60,24 +60,26 @@
 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
 
 class os: AllStatic {
- private:
+ public:
   enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
 
+ private:
   static OSThread*          _starting_thread;
   static address            _polling_page;
   static volatile int32_t * _mem_serialize_page;
   static uintptr_t          _serialize_page_mask;
+ public:
   static size_t             _page_sizes[page_sizes_max];
 
+ private:
   static void init_page_sizes(size_t default_page_size) {
     _page_sizes[0] = default_page_size;
     _page_sizes[1] = 0; // sentinel
   }
 
  public:
-
-  static void init(void);                       // Called before command line parsing
-  static jint init_2(void);                    // Called after command line parsing
+  static void init(void);   // Called before command line parsing
+  static jint init_2(void); // Called after command line parsing
 
   // File names are case-insensitive on windows only
   // Override me as needed
@@ -141,6 +143,7 @@
   static int processor_count() {
     return _processor_count;
   }
+  static void set_processor_count(int count) { _processor_count = count; }
 
   // Returns the number of CPUs this process is currently allowed to run on.
   // Note that on some OSes this can change dynamically.
@@ -294,19 +297,16 @@
   }
 
   static bool    is_memory_serialize_page(JavaThread *thread, address addr) {
-    address thr_addr;
     if (UseMembar) return false;
-    // Calculate thread specific address
+    // Previously this function calculated the exact address of this
+    // thread's serialize page, and checked if the faulting address
+    // was equal.  However, some platforms mask off faulting addresses
+    // to the page size, so now we just check that the address is
+    // within the page.  This makes the thread argument unnecessary,
+    // but we retain the NULL check to preserve existing behaviour.
     if (thread == NULL) return false;
-    // TODO-FIXME: some platforms mask off faulting addresses to the base pagesize.
-    // Instead of using a test for equality we should probably use something
-    // of the form:
-    // return ((_mem_serialize_page ^ addr) & -pagesize) == 0
-    //
-    thr_addr  = (address)(((uintptr_t)thread >>
-                get_serialize_page_shift_count()) &
-                get_serialize_page_mask()) + (uintptr_t)_mem_serialize_page;
-    return  (thr_addr == addr);
+    address page = (address) _mem_serialize_page;
+    return addr >= page && addr < (page + os::vm_page_size());
   }
 
   static void block_on_serialize_page_trap();
--- a/src/share/vm/runtime/reflection.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/reflection.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -449,7 +449,7 @@
   // sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
   if (   JDK_Version::is_gte_jdk14x_version()
       && UseNewReflection
-      && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_magic_klass())) {
+      && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
     return true;
   }
 
@@ -482,6 +482,11 @@
       under_host_klass(accessee_ik, accessor))
     return true;
 
+  // Adapter frames can access anything.
+  if (MethodHandleCompiler::klass_is_method_handle_adapter_holder(accessor))
+    // This is an internal adapter frame from the MethodHandleCompiler.
+    return true;
+
   if (RelaxAccessControlCheck ||
       (accessor_ik->major_version() < JAVA_1_5_VERSION &&
        accessee_ik->major_version() < JAVA_1_5_VERSION)) {
@@ -541,7 +546,7 @@
   // sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
   if (   JDK_Version::is_gte_jdk14x_version()
       && UseNewReflection
-      && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_magic_klass())) {
+      && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
     return true;
   }
 
@@ -631,7 +636,7 @@
 
 objArrayHandle Reflection::get_parameter_types(methodHandle method, int parameter_count, oop* return_type, TRAPS) {
   // Allocate array holding parameter types (java.lang.Class instances)
-  objArrayOop m = oopFactory::new_objArray(SystemDictionary::class_klass(), parameter_count, CHECK_(objArrayHandle()));
+  objArrayOop m = oopFactory::new_objArray(SystemDictionary::Class_klass(), parameter_count, CHECK_(objArrayHandle()));
   objArrayHandle mirrors (THREAD, m);
   int index = 0;
   // Collect parameter types
@@ -1308,7 +1313,7 @@
   if (Klass::cast(klass)->oop_is_array() && which == MEMBER_DECLARED)  return NULL;
 
   if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
-    klass = SystemDictionary::object_klass();
+    klass = SystemDictionary::Object_klass();
   }
   instanceKlassHandle h_k(THREAD, klass);
 
@@ -1375,13 +1380,13 @@
   // Exclude primitive types
   if (java_lang_Class::is_primitive(mirror) ||
      (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array() && (which == MEMBER_DECLARED))) {
-    klassOop klass = SystemDictionary::reflect_method_klass();
+    klassOop klass = SystemDictionary::reflect_Method_klass();
     return oopFactory::new_objArray(klass, 0, CHECK_NULL);  // Return empty array
   }
 
   klassOop klass = java_lang_Class::as_klassOop(mirror);
   if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
-    klass = SystemDictionary::object_klass();
+    klass = SystemDictionary::Object_klass();
   }
   instanceKlassHandle h_k(THREAD, klass);
 
@@ -1411,7 +1416,7 @@
         }
 
         // Allocate result
-        klassOop klass = SystemDictionary::reflect_method_klass();
+        klassOop klass = SystemDictionary::reflect_Method_klass();
         objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
         objArrayHandle h_result (THREAD, r);
 
@@ -1462,7 +1467,7 @@
           }
         }
         // Allocate result
-        klassOop klass = SystemDictionary::reflect_method_klass();
+        klassOop klass = SystemDictionary::reflect_Method_klass();
         objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
         objArrayHandle h_result (THREAD, r);
 
@@ -1523,7 +1528,7 @@
   bool prim  = java_lang_Class::is_primitive(mirror);
   Klass* k = prim ? NULL : Klass::cast(java_lang_Class::as_klassOop(mirror));
   if (prim || k->is_interface() || k->oop_is_array()) {
-    return oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), 0, CHECK_NULL);  // Return empty array
+    return oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0, CHECK_NULL);  // Return empty array
   }
 
   // Must be instanceKlass at this point
--- a/src/share/vm/runtime/reflectionUtils.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/reflectionUtils.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -63,15 +63,15 @@
 void FilteredFieldsMap::initialize() {
   int offset;
   offset = java_lang_Throwable::get_backtrace_offset();
-  _filtered_fields->append(new FilteredField(SystemDictionary::throwable_klass(), offset));
+  _filtered_fields->append(new FilteredField(SystemDictionary::Throwable_klass(), offset));
   // The latest version of vm may be used with old jdk.
   if (JDK_Version::is_gte_jdk16x_version()) {
     // The following class fields do not exist in
     // previous version of jdk.
     offset = sun_reflect_ConstantPool::cp_oop_offset();
-    _filtered_fields->append(new FilteredField(SystemDictionary::reflect_constant_pool_klass(), offset));
+    _filtered_fields->append(new FilteredField(SystemDictionary::reflect_ConstantPool_klass(), offset));
     offset = sun_reflect_UnsafeStaticFieldAccessorImpl::base_offset();
-    _filtered_fields->append(new FilteredField(SystemDictionary::reflect_unsafe_static_field_accessor_impl_klass(), offset));
+    _filtered_fields->append(new FilteredField(SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass(), offset));
   }
 }
 
--- a/src/share/vm/runtime/sharedRuntime.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -364,7 +364,7 @@
 
 
 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
-  if (JvmtiExport::can_post_exceptions()) {
+  if (JvmtiExport::can_post_on_exceptions()) {
     vframeStream vfst(thread, true);
     methodHandle method = methodHandle(thread, vfst.method());
     address bcp = method()->bcp_from(vfst.bci());
@@ -607,7 +607,9 @@
           _implicit_null_throws++;
 #endif
           target_pc = nm->continuation_for_implicit_exception(pc);
-          guarantee(target_pc != 0, "must have a continuation point");
+          // If there's an unexpected fault, target_pc might be NULL,
+          // in which case we want to fall through into the normal
+          // error handling code.
         }
 
         break; // fall through
@@ -621,14 +623,15 @@
         _implicit_div0_throws++;
 #endif
         target_pc = nm->continuation_for_implicit_exception(pc);
-        guarantee(target_pc != 0, "must have a continuation point");
+        // If there's an unexpected fault, target_pc might be NULL,
+        // in which case we want to fall through into the normal
+        // error handling code.
         break; // fall through
       }
 
       default: ShouldNotReachHere();
     }
 
-    guarantee(target_pc != NULL, "must have computed destination PC for implicit exception");
     assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
 
     // for AbortVMOnException flag
@@ -802,7 +805,7 @@
 
 #ifdef ASSERT
   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
-  if (bc != Bytecodes::_invokestatic) {
+  if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
     assert(receiver.not_null(), "should have thrown exception");
     KlassHandle receiver_klass (THREAD, receiver->klass());
     klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
@@ -860,7 +863,7 @@
   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
     int retry_count = 0;
     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
-           callee_method->method_holder() != SystemDictionary::object_klass()) {
+           callee_method->method_holder() != SystemDictionary::Object_klass()) {
       // If has a pending exception then there is no need to re-try to
       // resolve this method.
       // If the method has been redefined, we need to try again.
@@ -1027,7 +1030,26 @@
   frame stub_frame = thread->last_frame();
   assert(stub_frame.is_runtime_frame(), "sanity check");
   frame caller_frame = stub_frame.sender(&reg_map);
-  if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) {
+
+  // MethodHandle invokes don't have a CompiledIC and should always
+  // simply redispatch to the callee_target.
+  address   sender_pc = caller_frame.pc();
+  CodeBlob* sender_cb = caller_frame.cb();
+  nmethod*  sender_nm = sender_cb->as_nmethod_or_null();
+  bool is_mh_invoke_via_adapter = false;  // Direct c2c call or via adapter?
+  if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
+    // If the callee_target is set, then we have come here via an i2c
+    // adapter.
+    methodOop callee = thread->callee_target();
+    if (callee != NULL) {
+      assert(callee->is_method(), "sanity");
+      is_mh_invoke_via_adapter = true;
+    }
+  }
+
+  if (caller_frame.is_interpreted_frame() ||
+      caller_frame.is_entry_frame()       ||
+      is_mh_invoke_via_adapter) {
     methodOop callee = thread->callee_target();
     guarantee(callee != NULL && callee->is_method(), "bad handshake");
     thread->set_vm_result(callee);
@@ -1342,7 +1364,7 @@
 // We are calling the interpreter via a c2i. Normally this would mean that
 // we were called by a compiled method. However we could have lost a race
 // where we went int -> i2c -> c2i and so the caller could in fact be
-// interpreted. If the caller is compiled we attampt to patch the caller
+// interpreted. If the caller is compiled we attempt to patch the caller
 // so he no longer calls into the interpreter.
 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
   methodOop moop(method);
@@ -1358,10 +1380,19 @@
   // we did we'd leap into space because the callsite needs to use
   // "to interpreter" stub in order to load up the methodOop. Don't
   // ask me how I know this...
-  //
 
   CodeBlob* cb = CodeCache::find_blob(caller_pc);
-  if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
+  if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
+    return;
+  }
+
+  // The check above makes sure this is a nmethod.
+  nmethod* nm = cb->as_nmethod_or_null();
+  assert(nm, "must be");
+
+  // Don't fixup MethodHandle call sites as c2i/i2c adapters are used
+  // to implement MethodHandle actions.
+  if (nm->is_method_handle_return(caller_pc)) {
     return;
   }
 
@@ -1376,7 +1407,7 @@
 
   if (moop->code() == NULL) return;
 
-  if (((nmethod*)cb)->is_in_use()) {
+  if (nm->is_in_use()) {
 
     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
@@ -1408,7 +1439,7 @@
         if (callee == cb || callee->is_adapter_blob()) {
           // static call or optimized virtual
           if (TraceCallFixup) {
-            tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
+            tty->print("fixup callsite           at " INTPTR_FORMAT " to compiled code for", caller_pc);
             moop->print_short_name(tty);
             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
           }
@@ -1424,7 +1455,7 @@
         }
       } else {
           if (TraceCallFixup) {
-            tty->print("already patched  callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
+            tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
             moop->print_short_name(tty);
             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
           }
@@ -1529,7 +1560,7 @@
 oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr,
                                                             oopDesc* required) {
   if (required == NULL)  return NULL;
-  if (required->klass() == SystemDictionary::class_klass())
+  if (required->klass() == SystemDictionary::Class_klass())
     return required;
   if (required->is_klass())
     return Klass::cast(klassOop(required))->java_mirror();
@@ -1671,6 +1702,8 @@
   if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
   if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
 
+  AdapterHandlerLibrary::print_statistics();
+
   if (xtty != NULL)  xtty->tail("statistics");
 }
 
@@ -1771,11 +1804,282 @@
 #endif
 
 
+// A simple wrapper class around the calling convention information
+// that allows sharing of adapters for the same calling convention.
+class AdapterFingerPrint : public CHeapObj {
+ private:
+  union {
+    int  _compact[3];
+    int* _fingerprint;
+  } _value;
+  int _length; // A negative length indicates the fingerprint is in the compact form,
+               // Otherwise _value._fingerprint is the array.
+
+  // Remap BasicTypes that are handled equivalently by the adapters.
+  // These are correct for the current system but someday it might be
+  // necessary to make this mapping platform dependent.
+  static BasicType adapter_encoding(BasicType in) {
+    assert((~0xf & in) == 0, "must fit in 4 bits");
+    switch(in) {
+      case T_BOOLEAN:
+      case T_BYTE:
+      case T_SHORT:
+      case T_CHAR:
+        // There are all promoted to T_INT in the calling convention
+        return T_INT;
+
+      case T_OBJECT:
+      case T_ARRAY:
+        if (!TaggedStackInterpreter) {
+#ifdef _LP64
+          return T_LONG;
+#else
+          return T_INT;
+#endif
+        }
+        return T_OBJECT;
+
+      case T_INT:
+      case T_LONG:
+      case T_FLOAT:
+      case T_DOUBLE:
+      case T_VOID:
+        return in;
+
+      default:
+        ShouldNotReachHere();
+        return T_CONFLICT;
+    }
+  }
+
+ public:
+  AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
+    // The fingerprint is based on the BasicType signature encoded
+    // into an array of ints with four entries per int.
+    int* ptr;
+    int len = (total_args_passed + 3) >> 2;
+    if (len <= (int)(sizeof(_value._compact) / sizeof(int))) {
+      _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
+      // Storing the signature encoded as signed chars hits about 98%
+      // of the time.
+      _length = -len;
+      ptr = _value._compact;
+    } else {
+      _length = len;
+      _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length);
+      ptr = _value._fingerprint;
+    }
+
+    // Now pack the BasicTypes with 4 per int
+    int sig_index = 0;
+    for (int index = 0; index < len; index++) {
+      int value = 0;
+      for (int byte = 0; byte < 4; byte++) {
+        if (sig_index < total_args_passed) {
+          value = (value << 4) | adapter_encoding(sig_bt[sig_index++]);
+        }
+      }
+      ptr[index] = value;
+    }
+  }
+
+  ~AdapterFingerPrint() {
+    if (_length > 0) {
+      FREE_C_HEAP_ARRAY(int, _value._fingerprint);
+    }
+  }
+
+  int value(int index) {
+    if (_length < 0) {
+      return _value._compact[index];
+    }
+    return _value._fingerprint[index];
+  }
+  int length() {
+    if (_length < 0) return -_length;
+    return _length;
+  }
+
+  bool is_compact() {
+    return _length <= 0;
+  }
+
+  unsigned int compute_hash() {
+    int hash = 0;
+    for (int i = 0; i < length(); i++) {
+      int v = value(i);
+      hash = (hash << 8) ^ v ^ (hash >> 5);
+    }
+    return (unsigned int)hash;
+  }
+
+  const char* as_string() {
+    stringStream st;
+    for (int i = 0; i < length(); i++) {
+      st.print(PTR_FORMAT, value(i));
+    }
+    return st.as_string();
+  }
+
+  bool equals(AdapterFingerPrint* other) {
+    if (other->_length != _length) {
+      return false;
+    }
+    if (_length < 0) {
+      return _value._compact[0] == other->_value._compact[0] &&
+             _value._compact[1] == other->_value._compact[1] &&
+             _value._compact[2] == other->_value._compact[2];
+    } else {
+      for (int i = 0; i < _length; i++) {
+        if (_value._fingerprint[i] != other->_value._fingerprint[i]) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+};
+
+
+// A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
+class AdapterHandlerTable : public BasicHashtable {
+  friend class AdapterHandlerTableIterator;
+
+ private:
+
+#ifndef PRODUCT
+  static int _lookups; // number of calls to lookup
+  static int _buckets; // number of buckets checked
+  static int _equals;  // number of buckets checked with matching hash
+  static int _hits;    // number of successful lookups
+  static int _compact; // number of equals calls with compact signature
+#endif
+
+  AdapterHandlerEntry* bucket(int i) {
+    return (AdapterHandlerEntry*)BasicHashtable::bucket(i);
+  }
+
+ public:
+  AdapterHandlerTable()
+    : BasicHashtable(293, sizeof(AdapterHandlerEntry)) { }
+
+  // Create a new entry suitable for insertion in the table
+  AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
+    AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable::new_entry(fingerprint->compute_hash());
+    entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
+    return entry;
+  }
+
+  // Insert an entry into the table
+  void add(AdapterHandlerEntry* entry) {
+    int index = hash_to_index(entry->hash());
+    add_entry(index, entry);
+  }
+
+  void free_entry(AdapterHandlerEntry* entry) {
+    entry->deallocate();
+    BasicHashtable::free_entry(entry);
+  }
+
+  // Find a entry with the same fingerprint if it exists
+  AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
+    NOT_PRODUCT(_lookups++);
+    AdapterFingerPrint fp(total_args_passed, sig_bt);
+    unsigned int hash = fp.compute_hash();
+    int index = hash_to_index(hash);
+    for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
+      NOT_PRODUCT(_buckets++);
+      if (e->hash() == hash) {
+        NOT_PRODUCT(_equals++);
+        if (fp.equals(e->fingerprint())) {
+#ifndef PRODUCT
+          if (fp.is_compact()) _compact++;
+          _hits++;
+#endif
+          return e;
+        }
+      }
+    }
+    return NULL;
+  }
+
+#ifndef PRODUCT
+  void print_statistics() {
+    ResourceMark rm;
+    int longest = 0;
+    int empty = 0;
+    int total = 0;
+    int nonempty = 0;
+    for (int index = 0; index < table_size(); index++) {
+      int count = 0;
+      for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
+        count++;
+      }
+      if (count != 0) nonempty++;
+      if (count == 0) empty++;
+      if (count > longest) longest = count;
+      total += count;
+    }
+    tty->print_cr("AdapterHandlerTable: empty %d longest %d total %d average %f",
+                  empty, longest, total, total / (double)nonempty);
+    tty->print_cr("AdapterHandlerTable: lookups %d buckets %d equals %d hits %d compact %d",
+                  _lookups, _buckets, _equals, _hits, _compact);
+  }
+#endif
+};
+
+
+#ifndef PRODUCT
+
+int AdapterHandlerTable::_lookups;
+int AdapterHandlerTable::_buckets;
+int AdapterHandlerTable::_equals;
+int AdapterHandlerTable::_hits;
+int AdapterHandlerTable::_compact;
+
+class AdapterHandlerTableIterator : public StackObj {
+ private:
+  AdapterHandlerTable* _table;
+  int _index;
+  AdapterHandlerEntry* _current;
+
+  void scan() {
+    while (_index < _table->table_size()) {
+      AdapterHandlerEntry* a = _table->bucket(_index);
+      if (a != NULL) {
+        _current = a;
+        return;
+      }
+      _index++;
+    }
+  }
+
+ public:
+  AdapterHandlerTableIterator(AdapterHandlerTable* table): _table(table), _index(0), _current(NULL) {
+    scan();
+  }
+  bool has_next() {
+    return _current != NULL;
+  }
+  AdapterHandlerEntry* next() {
+    if (_current != NULL) {
+      AdapterHandlerEntry* result = _current;
+      _current = _current->next();
+      if (_current == NULL) scan();
+      return result;
+    } else {
+      return NULL;
+    }
+  }
+};
+#endif
+
+
 // ---------------------------------------------------------------------------
 // Implementation of AdapterHandlerLibrary
 const char* AdapterHandlerEntry::name = "I2C/C2I adapters";
-GrowableArray<uint64_t>* AdapterHandlerLibrary::_fingerprints = NULL;
-GrowableArray<AdapterHandlerEntry* >* AdapterHandlerLibrary::_handlers = NULL;
+AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL;
+AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
 const int AdapterHandlerLibrary_size = 16*K;
 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
 
@@ -1787,28 +2091,31 @@
 }
 
 void AdapterHandlerLibrary::initialize() {
-  if (_fingerprints != NULL) return;
-  _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true);
-  _handlers = new(ResourceObj::C_HEAP)GrowableArray<AdapterHandlerEntry*>(32, true);
-  // Index 0 reserved for the slow path handler
-  _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
-  _handlers->append(NULL);
+  if (_adapters != NULL) return;
+  _adapters = new AdapterHandlerTable();
 
   // Create a special handler for abstract methods.  Abstract methods
   // are never compiled so an i2c entry is somewhat meaningless, but
   // fill it in with something appropriate just in case.  Pass handle
   // wrong method for the c2i transitions.
   address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
-  _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
-  assert(_handlers->length() == AbstractMethodHandler, "in wrong slot");
-  _handlers->append(new AdapterHandlerEntry(StubRoutines::throw_AbstractMethodError_entry(),
-                                            wrong_method, wrong_method));
+  _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
+                                                              StubRoutines::throw_AbstractMethodError_entry(),
+                                                              wrong_method, wrong_method);
 }
 
-int AdapterHandlerLibrary::get_create_adapter_index(methodHandle method) {
-  // Use customized signature handler.  Need to lock around updates to the
-  // _fingerprints array (it is not safe for concurrent readers and a single
-  // writer: this can be fixed if it becomes a problem).
+AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
+                                                      address i2c_entry,
+                                                      address c2i_entry,
+                                                      address c2i_unverified_entry) {
+  return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
+}
+
+AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
+  // Use customized signature handler.  Need to lock around updates to
+  // the AdapterHandlerTable (it is not safe for concurrent readers
+  // and a single writer: this could be fixed if it becomes a
+  // problem).
 
   // Get the address of the ic_miss handlers before we grab the
   // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
@@ -1819,47 +2126,58 @@
   address ic_miss = SharedRuntime::get_ic_miss_stub();
   assert(ic_miss != NULL, "must have handler");
 
-  int result;
+  ResourceMark rm;
+
   NOT_PRODUCT(int code_size);
   BufferBlob *B = NULL;
   AdapterHandlerEntry* entry = NULL;
-  uint64_t fingerprint;
+  AdapterFingerPrint* fingerprint = NULL;
   {
     MutexLocker mu(AdapterHandlerLibrary_lock);
     // make sure data structure is initialized
     initialize();
 
     if (method->is_abstract()) {
-      return AbstractMethodHandler;
+      return _abstract_method_handler;
     }
 
+    // Fill in the signature array, for the calling-convention call.
+    int total_args_passed = method->size_of_parameters(); // All args on stack
+
+    BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
+    VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
+    int i = 0;
+    if (!method->is_static())  // Pass in receiver first
+      sig_bt[i++] = T_OBJECT;
+    for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
+      sig_bt[i++] = ss.type();  // Collect remaining bits of signature
+      if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
+        sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
+    }
+    assert(i == total_args_passed, "");
+
     // Lookup method signature's fingerprint
-    fingerprint = Fingerprinter(method).fingerprint();
-    assert( fingerprint != CONST64( 0), "no zero fingerprints allowed" );
-    // Fingerprints are small fixed-size condensed representations of
-    // signatures.  If the signature is too large, it won't fit in a
-    // fingerprint.  Signatures which cannot support a fingerprint get a new i2c
-    // adapter gen'd each time, instead of searching the cache for one.  This -1
-    // game can be avoided if I compared signatures instead of using
-    // fingerprints.  However, -1 fingerprints are very rare.
-    if( fingerprint != UCONST64(-1) ) { // If this is a cache-able fingerprint
-      // Turns out i2c adapters do not care what the return value is.  Mask it
-      // out so signatures that only differ in return type will share the same
-      // adapter.
-      fingerprint &= ~(SignatureIterator::result_feature_mask << SignatureIterator::static_feature_size);
-      // Search for a prior existing i2c/c2i adapter
-      int index = _fingerprints->find(fingerprint);
-      if( index >= 0 ) return index; // Found existing handlers?
-    } else {
-      // Annoyingly, I end up adding -1 fingerprints to the array of handlers,
-      // because I need a unique handler index.  It cannot be scanned for
-      // because all -1's look alike.  Instead, the matching index is passed out
-      // and immediately used to collect the 2 return values (the c2i and i2c
-      // adapters).
+    entry = _adapters->lookup(total_args_passed, sig_bt);
+
+#ifdef ASSERT
+    AdapterHandlerEntry* shared_entry = NULL;
+    if (VerifyAdapterSharing && entry != NULL) {
+      shared_entry = entry;
+      entry = NULL;
+    }
+#endif
+
+    if (entry != NULL) {
+      return entry;
     }
 
+    // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
+    int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
+
+    // Make a C heap allocated version of the fingerprint to store in the adapter
+    fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
+
     // Create I2C & C2I handlers
-    ResourceMark rm;
 
     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
     if (buf != NULL) {
@@ -1869,32 +2187,26 @@
                                              sizeof(buffer_locs)/sizeof(relocInfo));
       MacroAssembler _masm(&buffer);
 
-      // Fill in the signature array, for the calling-convention call.
-      int total_args_passed = method->size_of_parameters(); // All args on stack
-
-      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
-      VMRegPair  * regs   = NEW_RESOURCE_ARRAY(VMRegPair  ,total_args_passed);
-      int i=0;
-      if( !method->is_static() )  // Pass in receiver first
-        sig_bt[i++] = T_OBJECT;
-      for( SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
-        sig_bt[i++] = ss.type();  // Collect remaining bits of signature
-        if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
-          sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
-      }
-      assert( i==total_args_passed, "" );
-
-      // Now get the re-packed compiled-Java layout.
-      int comp_args_on_stack;
-
-      // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
-      comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
-
       entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
                                                      total_args_passed,
                                                      comp_args_on_stack,
                                                      sig_bt,
-                                                     regs);
+                                                     regs,
+                                                     fingerprint);
+
+#ifdef ASSERT
+      if (VerifyAdapterSharing) {
+        if (shared_entry != NULL) {
+          assert(shared_entry->compare_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt),
+                 "code must match");
+          // Release the one just created and return the original
+          _adapters->free_entry(entry);
+          return shared_entry;
+        } else  {
+          entry->save_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt);
+        }
+      }
+#endif
 
       B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
       NOT_PRODUCT(code_size = buffer.code_size());
@@ -1903,49 +2215,33 @@
       // CodeCache is full, disable compilation
       // Ought to log this but compile log is only per compile thread
       // and we're some non descript Java thread.
-      UseInterpreter = true;
-      if (UseCompiler || AlwaysCompileLoopMethods ) {
-#ifndef PRODUCT
-        warning("CodeCache is full. Compiler has been disabled");
-        if (CompileTheWorld || ExitOnFullCodeCache) {
-          before_exit(JavaThread::current());
-          exit_globals(); // will delete tty
-          vm_direct_exit(CompileTheWorld ? 0 : 1);
-        }
-#endif
-        UseCompiler               = false;
-        AlwaysCompileLoopMethods  = false;
-      }
-      return 0; // Out of CodeCache space (_handlers[0] == NULL)
+      MutexUnlocker mu(AdapterHandlerLibrary_lock);
+      CompileBroker::handle_full_code_cache();
+      return NULL; // Out of CodeCache space
     }
     entry->relocate(B->instructions_begin());
 #ifndef PRODUCT
     // debugging suppport
     if (PrintAdapterHandlers) {
       tty->cr();
-      tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = 0x%llx, %d bytes generated)",
-                    _handlers->length(), (method->is_static() ? "static" : "receiver"),
-                    method->signature()->as_C_string(), fingerprint, code_size );
+      tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)",
+                    _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
+                    method->signature()->as_C_string(), fingerprint->as_string(), code_size );
       tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
       Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + code_size);
     }
 #endif
 
-    // add handlers to library
-    _fingerprints->append(fingerprint);
-    _handlers->append(entry);
-    // set handler index
-    assert(_fingerprints->length() == _handlers->length(), "sanity check");
-    result = _fingerprints->length() - 1;
+    _adapters->add(entry);
   }
   // Outside of the lock
   if (B != NULL) {
     char blob_id[256];
     jio_snprintf(blob_id,
                  sizeof(blob_id),
-                 "%s(" PTR64_FORMAT ")@" PTR_FORMAT,
+                 "%s(%s)@" PTR_FORMAT,
                  AdapterHandlerEntry::name,
-                 fingerprint,
+                 fingerprint->as_string(),
                  B->instructions_begin());
     VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
     Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
@@ -1956,7 +2252,7 @@
                                                B->instructions_end());
     }
   }
-  return result;
+  return entry;
 }
 
 void AdapterHandlerEntry::relocate(address new_base) {
@@ -1966,6 +2262,44 @@
     _c2i_unverified_entry += delta;
 }
 
+
+void AdapterHandlerEntry::deallocate() {
+  delete _fingerprint;
+#ifdef ASSERT
+  if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
+  if (_saved_sig)  FREE_C_HEAP_ARRAY(Basictype, _saved_sig);
+#endif
+}
+
+
+#ifdef ASSERT
+// Capture the code before relocation so that it can be compared
+// against other versions.  If the code is captured after relocation
+// then relative instructions won't be equivalent.
+void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
+  _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length);
+  _code_length = length;
+  memcpy(_saved_code, buffer, length);
+  _total_args_passed = total_args_passed;
+  _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed);
+  memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType));
+}
+
+
+bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
+  if (length != _code_length) {
+    return false;
+  }
+  for (int i = 0; i < length; i++) {
+    if (buffer[i] != _saved_code[i]) {
+      return false;
+    }
+  }
+  return true;
+}
+#endif
+
+
 // Create a native wrapper for this native method.  The wrapper converts the
 // java compiled calling convention to the native convention, handlizes
 // arguments, and transitions to native.  On return from the native we transition
@@ -2044,19 +2378,8 @@
     // CodeCache is full, disable compilation
     // Ought to log this but compile log is only per compile thread
     // and we're some non descript Java thread.
-    UseInterpreter = true;
-    if (UseCompiler || AlwaysCompileLoopMethods ) {
-#ifndef PRODUCT
-      warning("CodeCache is full. Compiler has been disabled");
-      if (CompileTheWorld || ExitOnFullCodeCache) {
-        before_exit(JavaThread::current());
-        exit_globals(); // will delete tty
-        vm_direct_exit(CompileTheWorld ? 0 : 1);
-      }
-#endif
-      UseCompiler               = false;
-      AlwaysCompileLoopMethods  = false;
-    }
+    MutexUnlocker mu(AdapterHandlerLibrary_lock);
+    CompileBroker::handle_full_code_cache();
   }
   return nm;
 }
@@ -2136,7 +2459,7 @@
   return regs.first();
 }
 
-VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) {
+VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool has_receiver, int* arg_size) {
   // This method is returning a data structure allocating as a
   // ResourceObject, so do not put any ResourceMarks in here.
   char *s = sig->as_C_string();
@@ -2148,7 +2471,7 @@
   BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
   VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
   int cnt = 0;
-  if (!is_static) {
+  if (has_receiver) {
     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
   }
 
@@ -2299,30 +2622,31 @@
 
 #ifndef PRODUCT
 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
-
-  if (_handlers == NULL) return false;
-
-  for (int i = 0 ; i < _handlers->length() ; i++) {
-    AdapterHandlerEntry* a = get_entry(i);
-    if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
+  AdapterHandlerTableIterator iter(_adapters);
+  while (iter.has_next()) {
+    AdapterHandlerEntry* a = iter.next();
+    if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
   }
   return false;
 }
 
 void AdapterHandlerLibrary::print_handler(CodeBlob* b) {
-
-  for (int i = 0 ; i < _handlers->length() ; i++) {
-    AdapterHandlerEntry* a = get_entry(i);
-    if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) {
+  AdapterHandlerTableIterator iter(_adapters);
+  while (iter.has_next()) {
+    AdapterHandlerEntry* a = iter.next();
+    if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) {
       tty->print("Adapter for signature: ");
-      // Fingerprinter::print(_fingerprints->at(i));
-      tty->print("0x%" FORMAT64_MODIFIER "x", _fingerprints->at(i));
-      tty->print_cr(" i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
+      tty->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
+                    a->fingerprint()->as_string(),
                     a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
-
       return;
     }
   }
   assert(false, "Should have found handler");
 }
+
+void AdapterHandlerLibrary::print_statistics() {
+  _adapters->print_statistics();
+}
+
 #endif /* PRODUCT */
--- a/src/share/vm/runtime/sharedRuntime.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,8 @@
  */
 
 class AdapterHandlerEntry;
+class AdapterHandlerTable;
+class AdapterFingerPrint;
 class vframeStream;
 
 // Runtime is the base class for various runtime interfaces
@@ -337,7 +339,8 @@
                                                       int total_args_passed,
                                                       int max_arg,
                                                       const BasicType *sig_bt,
-                                                      const VMRegPair *regs);
+                                                      const VMRegPair *regs,
+                                                      AdapterFingerPrint* fingerprint);
 
   // OSR support
 
@@ -357,7 +360,7 @@
 
   // Convert a sig into a calling convention register layout
   // and find interesting things about it.
-  static VMRegPair* find_callee_arguments(symbolOop sig, bool is_static, int *arg_size);
+  static VMRegPair* find_callee_arguments(symbolOop sig, bool has_receiver, int *arg_size);
   static VMReg     name_for_receiver();
 
   // "Top of Stack" slots that may be unused by the calling convention but must
@@ -528,28 +531,64 @@
 // used by the adapters.  The code generation happens here because it's very
 // similar to what the adapters have to do.
 
-class AdapterHandlerEntry : public CHeapObj {
+class AdapterHandlerEntry : public BasicHashtableEntry {
+  friend class AdapterHandlerTable;
+
  private:
+  AdapterFingerPrint* _fingerprint;
   address _i2c_entry;
   address _c2i_entry;
   address _c2i_unverified_entry;
 
+#ifdef ASSERT
+  // Captures code and signature used to generate this adapter when
+  // verifing adapter equivalence.
+  unsigned char* _saved_code;
+  int            _code_length;
+  BasicType*     _saved_sig;
+  int            _total_args_passed;
+#endif
+
+  void init(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
+    _fingerprint = fingerprint;
+    _i2c_entry = i2c_entry;
+    _c2i_entry = c2i_entry;
+    _c2i_unverified_entry = c2i_unverified_entry;
+#ifdef ASSERT
+    _saved_code = NULL;
+    _code_length = 0;
+    _saved_sig = NULL;
+    _total_args_passed = 0;
+#endif
+  }
+
+  void deallocate();
+
+  // should never be used
+  AdapterHandlerEntry();
+
  public:
-
   // The name we give all buffer blobs
   static const char* name;
 
-  AdapterHandlerEntry(address i2c_entry, address c2i_entry, address c2i_unverified_entry):
-    _i2c_entry(i2c_entry),
-    _c2i_entry(c2i_entry),
-    _c2i_unverified_entry(c2i_unverified_entry) {
-  }
-
   address get_i2c_entry()            { return _i2c_entry; }
   address get_c2i_entry()            { return _c2i_entry; }
   address get_c2i_unverified_entry() { return _c2i_unverified_entry; }
 
   void relocate(address new_base);
+
+  AdapterFingerPrint* fingerprint()  { return _fingerprint; }
+
+  AdapterHandlerEntry* next() {
+    return (AdapterHandlerEntry*)BasicHashtableEntry::next();
+  }
+
+#ifdef ASSERT
+  // Used to verify that code generated for shared adapters is equivalent
+  void save_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt);
+  bool compare_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt);
+#endif
+
 #ifndef PRODUCT
   void print();
 #endif /* PRODUCT */
@@ -558,30 +597,18 @@
 class AdapterHandlerLibrary: public AllStatic {
  private:
   static BufferBlob* _buffer; // the temporary code buffer in CodeCache
-  static GrowableArray<uint64_t>* _fingerprints; // the fingerprint collection
-  static GrowableArray<AdapterHandlerEntry*> * _handlers; // the corresponding handlers
-  enum {
-    AbstractMethodHandler = 1 // special handler for abstract methods
-  };
+  static AdapterHandlerTable* _adapters;
+  static AdapterHandlerEntry* _abstract_method_handler;
   static BufferBlob* buffer_blob();
   static void initialize();
-  static int get_create_adapter_index(methodHandle method);
-  static address get_i2c_entry( int index ) {
-    return get_entry(index)->get_i2c_entry();
-  }
-  static address get_c2i_entry( int index ) {
-    return get_entry(index)->get_c2i_entry();
-  }
-  static address get_c2i_unverified_entry( int index ) {
-    return get_entry(index)->get_c2i_unverified_entry();
-  }
 
  public:
-  static AdapterHandlerEntry* get_entry( int index ) { return _handlers->at(index); }
+
+  static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint,
+                                        address i2c_entry, address c2i_entry, address c2i_unverified_entry);
   static nmethod* create_native_wrapper(methodHandle method);
-  static AdapterHandlerEntry* get_adapter(methodHandle method)  {
-    return get_entry(get_create_adapter_index(method));
-  }
+  static AdapterHandlerEntry* get_adapter(methodHandle method);
+
 #ifdef HAVE_DTRACE_H
   static nmethod* create_dtrace_nmethod (methodHandle method);
 #endif // HAVE_DTRACE_H
@@ -589,6 +616,7 @@
 #ifndef PRODUCT
   static void print_handler(CodeBlob* b);
   static bool contains(CodeBlob* b);
+  static void print_statistics();
 #endif /* PRODUCT */
 
 };
--- a/src/share/vm/runtime/statSampler.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/statSampler.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -177,7 +177,7 @@
 
   // public static String getProperty(String key, String def);
   JavaCalls::call_static(&result,
-                         KlassHandle(THREAD, SystemDictionary::system_klass()),
+                         KlassHandle(THREAD, SystemDictionary::System_klass()),
                          vmSymbolHandles::getProperty_name(),
                          vmSymbolHandles::string_string_signature(),
                          key_str,
--- a/src/share/vm/runtime/stubRoutines.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/stubRoutines.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,6 +97,14 @@
 address StubRoutines::_unsafe_arraycopy                  = NULL;
 address StubRoutines::_generic_arraycopy                 = NULL;
 
+double (* StubRoutines::_intrinsic_log   )(double) = NULL;
+double (* StubRoutines::_intrinsic_log10 )(double) = NULL;
+double (* StubRoutines::_intrinsic_exp   )(double) = NULL;
+double (* StubRoutines::_intrinsic_pow   )(double, double) = NULL;
+double (* StubRoutines::_intrinsic_sin   )(double) = NULL;
+double (* StubRoutines::_intrinsic_cos   )(double) = NULL;
+double (* StubRoutines::_intrinsic_tan   )(double) = NULL;
+
 // Initialization
 //
 // Note: to break cycle with universe initialization, stubs are generated in two phases.
@@ -188,11 +196,19 @@
 // Default versions of arraycopy functions
 //
 
+static void gen_arraycopy_barrier_pre(oop* dest, size_t count) {
+    assert(count != 0, "count should be non-zero");
+    assert(count <= (size_t)max_intx, "count too large");
+    BarrierSet* bs = Universe::heap()->barrier_set();
+    assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt");
+    bs->write_ref_array_pre(dest, (int)count);
+}
+
 static void gen_arraycopy_barrier(oop* dest, size_t count) {
     assert(count != 0, "count should be non-zero");
     BarrierSet* bs = Universe::heap()->barrier_set();
     assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-    bs->write_ref_array(MemRegion((HeapWord*)dest, (HeapWord*)(dest + count)));
+    bs->write_ref_array((HeapWord*)dest, count);
 }
 
 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
@@ -232,6 +248,7 @@
   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
 #endif // !PRODUCT
   assert(count != 0, "count should be non-zero");
+  gen_arraycopy_barrier_pre(dest, count);
   Copy::conjoint_oops_atomic(src, dest, count);
   gen_arraycopy_barrier(dest, count);
 JRT_END
@@ -273,6 +290,7 @@
   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
 #endif // !PRODUCT
   assert(count != 0, "count should be non-zero");
+  gen_arraycopy_barrier_pre((oop *) dest, count);
   Copy::arrayof_conjoint_oops(src, dest, count);
   gen_arraycopy_barrier((oop *) dest, count);
 JRT_END
--- a/src/share/vm/runtime/stubRoutines.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/stubRoutines.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -148,6 +148,20 @@
   static address _unsafe_arraycopy;
   static address _generic_arraycopy;
 
+  // These are versions of the java.lang.Math methods which perform
+  // the same operations as the intrinsic version.  They are used for
+  // constant folding in the compiler to ensure equivalence.  If the
+  // intrinsic version returns the same result as the strict version
+  // then they can be set to the appropriate function from
+  // SharedRuntime.
+  static double (*_intrinsic_log)(double);
+  static double (*_intrinsic_log10)(double);
+  static double (*_intrinsic_exp)(double);
+  static double (*_intrinsic_pow)(double, double);
+  static double (*_intrinsic_sin)(double);
+  static double (*_intrinsic_cos)(double);
+  static double (*_intrinsic_tan)(double);
+
  public:
   // Initialization/Testing
   static void    initialize1();                            // must happen before universe::genesis
@@ -245,6 +259,35 @@
   static address unsafe_arraycopy()        { return _unsafe_arraycopy; }
   static address generic_arraycopy()       { return _generic_arraycopy; }
 
+  static double  intrinsic_log(double d) {
+    assert(_intrinsic_log != NULL, "must be defined");
+    return _intrinsic_log(d);
+  }
+  static double  intrinsic_log10(double d) {
+    assert(_intrinsic_log != NULL, "must be defined");
+    return _intrinsic_log10(d);
+  }
+  static double  intrinsic_exp(double d) {
+    assert(_intrinsic_exp != NULL, "must be defined");
+    return _intrinsic_exp(d);
+  }
+  static double  intrinsic_pow(double d, double d2) {
+    assert(_intrinsic_pow != NULL, "must be defined");
+    return _intrinsic_pow(d, d2);
+  }
+  static double  intrinsic_sin(double d) {
+    assert(_intrinsic_sin != NULL, "must be defined");
+    return _intrinsic_sin(d);
+  }
+  static double  intrinsic_cos(double d) {
+    assert(_intrinsic_cos != NULL, "must be defined");
+    return _intrinsic_cos(d);
+  }
+  static double  intrinsic_tan(double d) {
+    assert(_intrinsic_tan != NULL, "must be defined");
+    return _intrinsic_tan(d);
+  }
+
   //
   // Default versions of the above arraycopy functions for platforms which do
   // not have specialized versions
--- a/src/share/vm/runtime/sweeper.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/sweeper.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -33,6 +33,11 @@
 jint      NMethodSweeper::_locked_seen = 0;
 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 bool      NMethodSweeper::_rescan = false;
+bool      NMethodSweeper::_was_full = false;
+jint      NMethodSweeper::_advise_to_sweep = 0;
+jlong     NMethodSweeper::_last_was_full = 0;
+uint      NMethodSweeper::_highest_marked = 0;
+long      NMethodSweeper::_was_full_traversal = 0;
 
 class MarkActivationClosure: public CodeBlobClosure {
 public:
@@ -114,6 +119,40 @@
       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
     }
   }
+
+  if (UseCodeCacheFlushing) {
+    if (!CodeCache::needs_flushing()) {
+      // In a safepoint, no race with setters
+      _advise_to_sweep = 0;
+    }
+
+    if (was_full()) {
+      // There was some progress so attempt to restart the compiler
+      jlong now           = os::javaTimeMillis();
+      jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
+      jlong curr_interval = now - _last_was_full;
+      if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
+        CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
+        set_was_full(false);
+
+        // Update the _last_was_full time so we can tell how fast the
+        // code cache is filling up
+        _last_was_full = os::javaTimeMillis();
+
+        if (PrintMethodFlushing) {
+          tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
+            CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+        }
+        if (LogCompilation && (xtty != NULL)) {
+          ttyLocker ttyl;
+          xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                           CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+          xtty->stamp();
+          xtty->end_elem();
+        }
+      }
+    }
+  }
 }
 
 
@@ -137,12 +176,12 @@
     if (nm->is_marked_for_reclamation()) {
       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
       if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod 0x%x (marked for reclamation) being flushed", nm);
+        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
       }
       nm->flush();
     } else {
       if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod 0x%x (zombie) being marked for reclamation", nm);
+        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
       }
       nm->mark_for_reclamation();
       _rescan = true;
@@ -152,7 +191,7 @@
     // stack we can safely convert it to a zombie method
     if (nm->can_not_entrant_be_converted()) {
       if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod 0x%x (not entrant) being made zombie", nm);
+        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
       }
       nm->make_zombie();
       _rescan = true;
@@ -167,7 +206,7 @@
   } else if (nm->is_unloaded()) {
     // Unloaded code, just make it a zombie
     if (PrintMethodFlushing && Verbose)
-      tty->print_cr("### Nmethod 0x%x (unloaded) being made zombie", nm);
+      tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
     if (nm->is_osr_method()) {
       // No inline caches will ever point to osr methods, so we can just remove it
       nm->flush();
@@ -177,7 +216,167 @@
     }
   } else {
     assert(nm->is_alive(), "should be alive");
+
+    if (UseCodeCacheFlushing) {
+      if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
+          (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
+          CodeCache::needs_flushing()) {
+        // This method has not been called since the forced cleanup happened
+        nm->make_not_entrant();
+      }
+    }
+
     // Clean-up all inline caches that points to zombie/non-reentrant methods
     nm->cleanup_inline_caches();
   }
 }
+
+// Code cache unloading: when compilers notice the code cache is getting full,
+// they will call a vm op that comes here. This code attempts to speculatively
+// unload the oldest half of the nmethods (based on the compile job id) by
+// saving the old code in a list in the CodeCache. Then
+// execution resumes. If a method so marked is not called by the second
+// safepoint from the current one, the nmethod will be marked non-entrant and
+// got rid of by normal sweeping. If the method is called, the methodOop's
+// _code field is restored and the methodOop/nmethod
+// go back to their normal state.
+void NMethodSweeper::handle_full_code_cache(bool is_full) {
+  // Only the first one to notice can advise us to start early cleaning
+  if (!is_full){
+    jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
+    if (old != 0) {
+      return;
+    }
+  }
+
+  if (is_full) {
+    // Since code cache is full, immediately stop new compiles
+    bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
+    if (!did_set) {
+      // only the first to notice can start the cleaning,
+      // others will go back and block
+      return;
+    }
+    set_was_full(true);
+
+    // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
+    jlong now = os::javaTimeMillis();
+    jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
+    jlong curr_interval = now - _last_was_full;
+    if (curr_interval < max_interval) {
+      _rescan = true;
+      if (PrintMethodFlushing) {
+        tty->print_cr("### handle full too often, turning off compiler");
+      }
+      if (LogCompilation && (xtty != NULL)) {
+        ttyLocker ttyl;
+        xtty->begin_elem("disable_compiler flushing_interval='" UINT64_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                         curr_interval/1000, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+        xtty->stamp();
+        xtty->end_elem();
+      }
+      return;
+    }
+  }
+
+  VM_HandleFullCodeCache op(is_full);
+  VMThread::execute(&op);
+
+  // rescan again as soon as possible
+  _rescan = true;
+}
+
+void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
+  // If there was a race in detecting full code cache, only run
+  // one vm op for it or keep the compiler shut off
+
+  debug_only(jlong start = os::javaTimeMillis();)
+
+  if ((!was_full()) && (is_full)) {
+    if (!CodeCache::needs_flushing()) {
+      if (PrintMethodFlushing) {
+        tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
+          CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+      }
+      if (LogCompilation && (xtty != NULL)) {
+        ttyLocker ttyl;
+        xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                         CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+        xtty->stamp();
+        xtty->end_elem();
+      }
+      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
+      return;
+    }
+  }
+
+  // Traverse the code cache trying to dump the oldest nmethods
+  uint curr_max_comp_id = CompileBroker::get_compilation_id();
+  uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
+  if (PrintMethodFlushing && Verbose) {
+    tty->print_cr("### Cleaning code cache: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes",
+        CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+  }
+  if (LogCompilation && (xtty != NULL)) {
+    ttyLocker ttyl;
+    xtty->begin_elem("start_cleaning_code_cache live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                      CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+    xtty->stamp();
+    xtty->end_elem();
+  }
+
+  nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
+  jint disconnected = 0;
+  jint made_not_entrant  = 0;
+  while ((nm != NULL)){
+    uint curr_comp_id = nm->compile_id();
+
+    // OSR methods cannot be flushed like this. Also, don't flush native methods
+    // since they are part of the JDK in most cases
+    if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
+        (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
+
+      if ((nm->method()->code() == nm)) {
+        // This method has not been previously considered for
+        // unloading or it was restored already
+        CodeCache::speculatively_disconnect(nm);
+        disconnected++;
+      } else if (nm->is_speculatively_disconnected()) {
+        // This method was previously considered for preemptive unloading and was not called since then
+        nm->method()->invocation_counter()->decay();
+        nm->method()->backedge_counter()->decay();
+        nm->make_not_entrant();
+        made_not_entrant++;
+      }
+
+      if (curr_comp_id > _highest_marked) {
+        _highest_marked = curr_comp_id;
+      }
+    }
+    nm = CodeCache::alive_nmethod(CodeCache::next(nm));
+  }
+
+  if (LogCompilation && (xtty != NULL)) {
+    ttyLocker ttyl;
+    xtty->begin_elem("stop_cleaning_code_cache disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                      disconnected, made_not_entrant, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+    xtty->stamp();
+    xtty->end_elem();
+  }
+
+  // Shut off compiler. Sweeper will run exiting from this safepoint
+  // and turn it back on if it clears enough space
+  if (was_full()) {
+    _last_was_full = os::javaTimeMillis();
+    CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
+  }
+
+  // After two more traversals the sweeper will get rid of unrestored nmethods
+  _was_full_traversal = _traversals;
+#ifdef ASSERT
+  jlong end = os::javaTimeMillis();
+  if(PrintMethodFlushing && Verbose) {
+    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
+  }
+#endif
+}
--- a/src/share/vm/runtime/sweeper.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/sweeper.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -38,6 +38,11 @@
   static int       _locked_seen;     // Number of locked nmethods encountered during the scan
   static int       _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
 
+  static bool      _was_full;        // remember if we did emergency unloading
+  static jint      _advise_to_sweep; // flag to indicate code cache getting full
+  static jlong     _last_was_full;   // timestamp of last emergency unloading
+  static uint      _highest_marked;   // highest compile id dumped at last emergency unloading
+  static long      _was_full_traversal;   // trav number at last emergency unloading
 
   static void process_nmethod(nmethod *nm);
  public:
@@ -51,4 +56,10 @@
     // changes to false at safepoint so we can never overwrite it with false.
      _rescan = true;
   }
+
+  static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
+  static void speculative_disconnect_nmethods(bool was_full);   // Called by vm op to deal with alloc failure
+
+  static void set_was_full(bool state) { _was_full = state; }
+  static bool was_full() { return _was_full; }
 };
--- a/src/share/vm/runtime/thread.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/thread.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -884,6 +884,22 @@
                                          vmSymbolHandles::void_method_signature(), CHECK);
 }
 
+#ifdef KERNEL
+static void set_jkernel_boot_classloader_hook(TRAPS) {
+  klassOop k = SystemDictionary::sun_jkernel_DownloadManager_klass();
+  instanceKlassHandle klass (THREAD, k);
+
+  if (k == NULL) {
+    // sun.jkernel.DownloadManager may not present in the JDK; just return
+    return;
+  }
+
+  JavaValue result(T_VOID);
+  JavaCalls::call_static(&result, klass, vmSymbolHandles::setBootClassLoaderHook_name(),
+                                         vmSymbolHandles::void_method_signature(), CHECK);
+}
+#endif // KERNEL
+
 static void reset_vm_info_property(TRAPS) {
   // the vm info string
   ResourceMark rm(THREAD);
@@ -957,7 +973,7 @@
     return;
   }
 
-  KlassHandle group(this, SystemDictionary::threadGroup_klass());
+  KlassHandle group(this, SystemDictionary::ThreadGroup_klass());
   Handle threadObj(this, this->threadObj());
 
   JavaCalls::call_special(&result,
@@ -975,6 +991,7 @@
 // uniquely named instances should derive from this.
 NamedThread::NamedThread() : Thread() {
   _name = NULL;
+  _processed_thread = NULL;
 }
 
 NamedThread::~NamedThread() {
@@ -1156,6 +1173,7 @@
   _exception_handler_pc = 0;
   _exception_stack_size = 0;
   _jvmti_thread_state= NULL;
+  _should_post_on_exceptions_flag = JNI_FALSE;
   _jvmti_get_loaded_classes_closure = NULL;
   _interp_only_mode    = 0;
   _special_runtime_exit_condition = _no_async_condition;
@@ -1451,7 +1469,7 @@
         // so call ThreadGroup.uncaughtException()
         KlassHandle recvrKlass(THREAD, threadObj->klass());
         CallInfo callinfo;
-        KlassHandle thread_klass(THREAD, SystemDictionary::thread_klass());
+        KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
         LinkResolver::resolve_virtual_call(callinfo, threadObj, recvrKlass, thread_klass,
                                            vmSymbolHandles::dispatchUncaughtException_name(),
                                            vmSymbolHandles::throwable_void_signature(),
@@ -1467,7 +1485,7 @@
                                   uncaught_exception,
                                   THREAD);
         } else {
-          KlassHandle thread_group(THREAD, SystemDictionary::threadGroup_klass());
+          KlassHandle thread_group(THREAD, SystemDictionary::ThreadGroup_klass());
           JavaValue result(T_VOID);
           JavaCalls::call_virtual(&result,
                                   group, thread_group,
@@ -1488,7 +1506,7 @@
       while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
         EXCEPTION_MARK;
         JavaValue result(T_VOID);
-        KlassHandle thread_klass(THREAD, SystemDictionary::thread_klass());
+        KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
         JavaCalls::call_virtual(&result,
                               threadObj, thread_klass,
                               vmSymbolHandles::exit_method_name(),
@@ -1726,7 +1744,7 @@
   // Check for pending async. exception
   if (_pending_async_exception != NULL) {
     // Only overwrite an already pending exception, if it is not a threadDeath.
-    if (!has_pending_exception() || !pending_exception()->is_a(SystemDictionary::threaddeath_klass())) {
+    if (!has_pending_exception() || !pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())) {
 
       // We cannot call Exceptions::_throw(...) here because we cannot block
       set_pending_exception(_pending_async_exception, __FILE__, __LINE__);
@@ -1835,14 +1853,14 @@
   if (is_Compiler_thread()) return;
 
   // This is a change from JDK 1.1, but JDK 1.2 will also do it:
-  if (java_throwable->is_a(SystemDictionary::threaddeath_klass())) {
+  if (java_throwable->is_a(SystemDictionary::ThreadDeath_klass())) {
     java_lang_Thread::set_stillborn(threadObj());
   }
 
   {
     // Actually throw the Throwable against the target Thread - however
     // only if there is no thread death exception installed already.
-    if (_pending_async_exception == NULL || !_pending_async_exception->is_a(SystemDictionary::threaddeath_klass())) {
+    if (_pending_async_exception == NULL || !_pending_async_exception->is_a(SystemDictionary::ThreadDeath_klass())) {
       // If the topmost frame is a runtime stub, then we are calling into
       // OptoRuntime from compiled code. Some runtime stubs (new, monitor_exit..)
       // must deoptimize the caller before continuing, as the compiled  exception handler table
@@ -2317,11 +2335,31 @@
   frames_do(frame_gc_prologue);
 }
 
+// If the caller is a NamedThread, then remember, in the current scope,
+// the given JavaThread in its _processed_thread field.
+class RememberProcessedThread: public StackObj {
+  NamedThread* _cur_thr;
+public:
+  RememberProcessedThread(JavaThread* jthr) {
+    Thread* thread = Thread::current();
+    if (thread->is_Named_thread()) {
+      _cur_thr = (NamedThread *)thread;
+      _cur_thr->set_processed_thread(jthr);
+    } else {
+      _cur_thr = NULL;
+    }
+  }
+
+  ~RememberProcessedThread() {
+    if (_cur_thr) {
+      _cur_thr->set_processed_thread(NULL);
+    }
+  }
+};
 
 void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
-  // Flush deferred store-barriers, if any, associated with
-  // initializing stores done by this JavaThread in the current epoch.
-  Universe::heap()->flush_deferred_store_barrier(this);
+  // Verify that the deferred card marks have been flushed.
+  assert(deferred_card_mark().is_empty(), "Should be empty during GC");
 
   // The ThreadProfiler oops_do is done from FlatProfiler::oops_do
   // since there may be more than one thread using each ThreadProfiler.
@@ -2333,6 +2371,8 @@
           (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
 
   if (has_last_Java_frame()) {
+    // Record JavaThread to GC thread
+    RememberProcessedThread rpt(this);
 
     // Traverse the privileged stack
     if (_privileged_stack_top != NULL) {
@@ -3055,6 +3095,12 @@
       warning("java.lang.ArithmeticException has not been initialized");
       warning("java.lang.StackOverflowError has not been initialized");
     }
+
+    if (EnableInvokeDynamic) {
+      // JSR 292: An intialized java.dyn.InvokeDynamic is required in
+      // the compiler.
+      initialize_class(vmSymbolHandles::java_dyn_InvokeDynamic(), CHECK_0);
+    }
   }
 
   // See        : bugid 4211085.
@@ -3102,6 +3148,12 @@
     vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
   }
 
+#ifdef KERNEL
+  if (JDK_Version::is_gte_jdk17x_version()) {
+    set_jkernel_boot_classloader_hook(THREAD);
+  }
+#endif // KERNEL
+
 #ifndef SERIALGC
   // Support for ConcurrentMarkSweep. This should be cleaned up
   // and better encapsulated. The ugly nested if test would go away
--- a/src/share/vm/runtime/thread.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/thread.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,12 @@
 
 // Class hierarchy
 // - Thread
-//   - VMThread
+//   - NamedThread
+//     - VMThread
+//     - ConcurrentGCThread
+//     - WorkerThread
+//       - GangWorker
+//       - GCTaskThread
 //   - JavaThread
 //   - WatcherThread
 
@@ -249,6 +254,7 @@
   virtual bool is_GC_task_thread() const             { return false; }
   virtual bool is_Watcher_thread() const             { return false; }
   virtual bool is_ConcurrentGC_thread() const        { return false; }
+  virtual bool is_Named_thread() const               { return false; }
 
   virtual char* name() const { return (char*)"Unknown thread"; }
 
@@ -568,12 +574,18 @@
   };
  private:
   char* _name;
+  // log JavaThread being processed by oops_do
+  JavaThread* _processed_thread;
+
  public:
   NamedThread();
   ~NamedThread();
   // May only be called once per thread.
   void set_name(const char* format, ...);
+  virtual bool is_Named_thread() const { return true; }
   virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; }
+  JavaThread *processed_thread() { return _processed_thread; }
+  void set_processed_thread(JavaThread *thread) { _processed_thread = thread; }
 };
 
 // Worker threads are named and have an id of an assigned work.
@@ -760,6 +772,7 @@
   volatile address _exception_pc;                // PC where exception happened
   volatile address _exception_handler_pc;        // PC for handler of exception
   volatile int     _exception_stack_size;        // Size of frame where exception happened
+  volatile int     _is_method_handle_exception;  // True if the current exception PC is at a MethodHandle call.
 
   // support for compilation
   bool    _is_compiling;                         // is true if a compilation is active inthis thread (one compilation per thread possible)
@@ -1095,11 +1108,13 @@
   int      exception_stack_size() const          { return _exception_stack_size; }
   address  exception_pc() const                  { return _exception_pc; }
   address  exception_handler_pc() const          { return _exception_handler_pc; }
+  int      is_method_handle_exception() const    { return _is_method_handle_exception; }
 
   void set_exception_oop(oop o)                  { _exception_oop = o; }
   void set_exception_pc(address a)               { _exception_pc = a; }
   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
   void set_exception_stack_size(int size)        { _exception_stack_size = size; }
+  void set_is_method_handle_exception(int value) { _is_method_handle_exception = value; }
 
   // Stack overflow support
   inline size_t stack_available(address cur_sp);
@@ -1173,10 +1188,14 @@
   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
   static ByteSize exception_stack_size_offset()  { return byte_offset_of(JavaThread, _exception_stack_size); }
+  static ByteSize is_method_handle_exception_offset() { return byte_offset_of(JavaThread, _is_method_handle_exception); }
   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
 
   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
+  static ByteSize should_post_on_exceptions_flag_offset() {
+    return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
+  }
 
 #ifndef SERIALGC
   static ByteSize satb_mark_queue_offset()       { return byte_offset_of(JavaThread, _satb_mark_queue); }
@@ -1416,6 +1435,16 @@
   void increment_interp_only_mode()         { ++_interp_only_mode; }
   void decrement_interp_only_mode()         { --_interp_only_mode; }
 
+  // support for cached flag that indicates whether exceptions need to be posted for this thread
+  // if this is false, we can avoid deoptimizing when events are thrown
+  // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything
+ private:
+  int    _should_post_on_exceptions_flag;
+
+ public:
+  int   should_post_on_exceptions_flag()  { return _should_post_on_exceptions_flag; }
+  void  set_should_post_on_exceptions_flag(int val)  { _should_post_on_exceptions_flag = val; }
+
  private:
   ThreadStatistics *_thread_stat;
 
--- a/src/share/vm/runtime/vframe.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/vframe.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -124,7 +124,7 @@
 static void print_locked_object_class_name(outputStream* st, Handle obj, const char* lock_state) {
   if (obj.not_null()) {
     st->print("\t- %s <" INTPTR_FORMAT "> ", lock_state, (address)obj());
-    if (obj->klass() == SystemDictionary::class_klass()) {
+    if (obj->klass() == SystemDictionary::Class_klass()) {
       klassOop target_klass = java_lang_Class::as_klassOop(obj());
       st->print_cr("(a java.lang.Class for %s)", instanceKlass::cast(target_klass)->external_name());
     } else {
@@ -430,8 +430,10 @@
       // This is Method.invoke() -- skip it
     } else if (use_new_reflection &&
               Klass::cast(method()->method_holder())
-                 ->is_subclass_of(SystemDictionary::reflect_method_accessor_klass())) {
+                 ->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
       // This is an auxilary frame -- skip it
+    } else if (method()->is_method_handle_adapter()) {
+      // This is an internal adapter frame from the MethodHandleCompiler -- skip it
     } else {
       // This is non-excluded frame, we need to count it against the depth
       if (depth-- <= 0) {
@@ -490,8 +492,8 @@
 void vframeStreamCommon::skip_reflection_related_frames() {
   while (!at_end() &&
          (JDK_Version::is_gte_jdk14x_version() && UseNewReflection &&
-          (Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_method_accessor_klass()) ||
-           Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_constructor_accessor_klass())))) {
+          (Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass()) ||
+           Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_ConstructorAccessorImpl_klass())))) {
     next();
   }
 }
--- a/src/share/vm/runtime/vframeArray.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/vframeArray.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -186,7 +186,7 @@
   int popframe_preserved_args_size_in_bytes = 0;
   int popframe_preserved_args_size_in_words = 0;
   if (is_top_frame) {
-  JvmtiThreadState *state = thread->jvmti_thread_state();
+    JvmtiThreadState *state = thread->jvmti_thread_state();
     if (JvmtiExport::can_pop_frame() &&
         (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
       if (thread->has_pending_popframe()) {
@@ -381,7 +381,6 @@
     RegisterMap map(thread);
     vframe* f = vframe::new_vframe(iframe(), &map, thread);
     f->print();
-    iframe()->interpreter_frame_print_on(tty);
 
     tty->print_cr("locals size     %d", locals()->size());
     tty->print_cr("expression size %d", expressions()->size());
@@ -582,7 +581,7 @@
 }
 
 void vframeArrayElement::print(outputStream* st) {
-  st->print_cr(" - interpreter_frame -> sp: ", INTPTR_FORMAT, iframe()->sp());
+  st->print_cr(" - interpreter_frame -> sp: " INTPTR_FORMAT, iframe()->sp());
 }
 
 void vframeArray::print_value_on(outputStream* st) const {
--- a/src/share/vm/runtime/vmStructs.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/vmStructs.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -309,6 +309,7 @@
   nonstatic_field(CollectedHeap,               _reserved,                                     MemRegion)                             \
   nonstatic_field(SharedHeap,                  _perm_gen,                                     PermGen*)                              \
   nonstatic_field(CollectedHeap,               _barrier_set,                                  BarrierSet*)                           \
+  nonstatic_field(CollectedHeap,               _defer_initial_card_mark,                      bool)                                  \
   nonstatic_field(CollectedHeap,               _is_gc_active,                                 bool)                                  \
   nonstatic_field(CompactibleSpace,            _compaction_top,                               HeapWord*)                             \
   nonstatic_field(CompactibleSpace,            _first_dead,                                   HeapWord*)                             \
@@ -455,40 +456,38 @@
       static_field(SystemDictionary,            _shared_dictionary,                            Dictionary*)                          \
       static_field(SystemDictionary,            _system_loader_lock_obj,                       oop)                                  \
       static_field(SystemDictionary,            _loader_constraints,                           LoaderConstraintTable*)               \
-      static_field(SystemDictionary,            WK_KLASS(object_klass),                        klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(string_klass),                        klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(class_klass),                         klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(cloneable_klass),                     klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(classloader_klass),                   klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(serializable_klass),                  klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(system_klass),                        klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(throwable_klass),                     klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(threaddeath_klass),                   klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(error_klass),                         klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(exception_klass),                     klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(runtime_exception_klass),             klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(classNotFoundException_klass),        klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(noClassDefFoundError_klass),          klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(linkageError_klass),                  klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Object_klass),                        klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(String_klass),                        klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Class_klass),                         klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Cloneable_klass),                     klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(ClassLoader_klass),                   klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Serializable_klass),                  klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(System_klass),                        klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Throwable_klass),                     klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(ThreadDeath_klass),                   klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Error_klass),                         klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Exception_klass),                     klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(RuntimeException_klass),              klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(ClassNotFoundException_klass),        klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(NoClassDefFoundError_klass),          klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(LinkageError_klass),                  klassOop)                             \
       static_field(SystemDictionary,            WK_KLASS(ClassCastException_klass),            klassOop)                             \
       static_field(SystemDictionary,            WK_KLASS(ArrayStoreException_klass),           klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(virtualMachineError_klass),           klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(VirtualMachineError_klass),           klassOop)                             \
       static_field(SystemDictionary,            WK_KLASS(OutOfMemoryError_klass),              klassOop)                             \
       static_field(SystemDictionary,            WK_KLASS(StackOverflowError_klass),            klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(protectionDomain_klass),              klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(ProtectionDomain_klass),              klassOop)                             \
       static_field(SystemDictionary,            WK_KLASS(AccessControlContext_klass),          klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(reference_klass),                     klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(soft_reference_klass),                klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(weak_reference_klass),                klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(final_reference_klass),               klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(phantom_reference_klass),             klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(finalizer_klass),                     klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(thread_klass),                        klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(threadGroup_klass),                   klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(properties_klass),                    klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(stringBuffer_klass),                  klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(vector_klass),                        klassOop)                             \
-      static_field(SystemDictionary,            WK_KLASS(hashtable_klass),                     klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Reference_klass),                     klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(SoftReference_klass),                 klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(WeakReference_klass),                 klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(FinalReference_klass),                klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(PhantomReference_klass),              klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Finalizer_klass),                     klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Thread_klass),                        klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(ThreadGroup_klass),                   klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(Properties_klass),                    klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(StringBuffer_klass),                  klassOop)                             \
       static_field(SystemDictionary,            _box_klasses[0],                               klassOop)                             \
       static_field(SystemDictionary,            _java_system_loader,                           oop)                                  \
                                                                                                                                      \
@@ -666,6 +665,7 @@
   nonstatic_field(Thread,                      _current_pending_monitor_is_from_java,         bool)                                  \
   nonstatic_field(Thread,                      _current_waiting_monitor,                      ObjectMonitor*)                        \
   nonstatic_field(NamedThread,                 _name,                                         char*)                                 \
+  nonstatic_field(NamedThread,                 _processed_thread,                             JavaThread*)                           \
   nonstatic_field(JavaThread,                  _next,                                         JavaThread*)                           \
   nonstatic_field(JavaThread,                  _threadObj,                                    oop)                                   \
   nonstatic_field(JavaThread,                  _anchor,                                       JavaFrameAnchor)                       \
--- a/src/share/vm/runtime/vmThread.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/vmThread.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -204,8 +204,8 @@
 }
 
 
-VMThread::VMThread() : Thread() {
-  // nothing to do
+VMThread::VMThread() : NamedThread() {
+  set_name("VM Thread");
 }
 
 void VMThread::destroy() {
@@ -426,11 +426,6 @@
       // follow that also require a safepoint
       if (_cur_vm_operation->evaluate_at_safepoint()) {
 
-        if (PrintGCApplicationConcurrentTime) {
-           gclog_or_tty->print_cr("Application time: %3.7f seconds",
-                                  RuntimeService::last_application_time_sec());
-        }
-
         _vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned
 
         SafepointSynchronize::begin();
@@ -477,12 +472,6 @@
         // Complete safepoint synchronization
         SafepointSynchronize::end();
 
-        if (PrintGCApplicationStoppedTime) {
-          gclog_or_tty->print_cr("Total time for which application threads "
-                                 "were stopped: %3.7f seconds",
-                                 RuntimeService::last_safepoint_time_sec());
-        }
-
       } else {  // not a safepoint operation
         if (TraceLongCompiles) {
           elapsedTimer t;
--- a/src/share/vm/runtime/vmThread.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/vmThread.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -83,7 +83,7 @@
 // like scavenge, garbage_collect etc.
 //
 
-class VMThread: public Thread {
+class VMThread: public NamedThread {
  private:
   static ThreadPriority _current_priority;
 
@@ -101,8 +101,6 @@
   bool is_VM_thread() const                      { return true; }
   bool is_GC_thread() const                      { return true; }
 
-  char* name() const { return (char*)"VM Thread"; }
-
   // The ever running loop for the VMThread
   void loop();
 
--- a/src/share/vm/runtime/vm_operations.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/vm_operations.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -151,6 +151,10 @@
 
 #endif // !PRODUCT
 
+void VM_HandleFullCodeCache::doit() {
+  NMethodSweeper::speculative_disconnect_nmethods(_is_full);
+}
+
 void VM_Verify::doit() {
   Universe::verify();
 }
--- a/src/share/vm/runtime/vm_operations.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/runtime/vm_operations.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -41,6 +41,7 @@
   template(DeoptimizeFrame)                       \
   template(DeoptimizeAll)                         \
   template(ZombieAll)                             \
+  template(HandleFullCodeCache)                   \
   template(Verify)                                \
   template(PrintJNI)                              \
   template(HeapDumper)                            \
@@ -241,6 +242,16 @@
   bool allow_nested_vm_operations() const        { return true;  }
 };
 
+class VM_HandleFullCodeCache: public VM_Operation {
+ private:
+  bool  _is_full;
+ public:
+  VM_HandleFullCodeCache(bool is_full)           { _is_full = is_full; }
+  VMOp_Type type() const                         { return VMOp_HandleFullCodeCache; }
+  void doit();
+  bool allow_nested_vm_operations() const        { return true; }
+};
+
 #ifndef PRODUCT
 class VM_DeoptimizeAll: public VM_Operation {
  private:
--- a/src/share/vm/services/attachListener.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/attachListener.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -207,7 +207,7 @@
     int tmp;
     int n = sscanf(arg1, "%d", &tmp);
     if (n != 1) {
-      out->print_cr("flag value has to be boolean (1 or 0)");
+      out->print_cr("flag value must be a boolean (1 or 0)");
       return JNI_ERR;
     }
     value = (tmp != 0);
@@ -226,11 +226,11 @@
   if ((arg1 = op->arg(1)) != NULL) {
     int n = sscanf(arg1, INTX_FORMAT, &value);
     if (n != 1) {
-      out->print_cr("flag value has to be integer");
+      out->print_cr("flag value must be an integer");
       return JNI_ERR;
     }
   }
-  bool res = CommandLineFlags::intxAtPut((char*)name,  &value, ATTACH_ON_DEMAND);
+  bool res = CommandLineFlags::intxAtPut((char*)name, &value, ATTACH_ON_DEMAND);
   if (! res) {
     out->print_cr("setting flag %s failed", name);
   }
@@ -245,11 +245,30 @@
   if ((arg1 = op->arg(1)) != NULL) {
     int n = sscanf(arg1, UINTX_FORMAT, &value);
     if (n != 1) {
-      out->print_cr("flag value has to be integer");
+      out->print_cr("flag value must be an unsigned integer");
       return JNI_ERR;
     }
   }
-  bool res = CommandLineFlags::uintxAtPut((char*)name,  &value, ATTACH_ON_DEMAND);
+  bool res = CommandLineFlags::uintxAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+  if (! res) {
+    out->print_cr("setting flag %s failed", name);
+  }
+
+  return res? JNI_OK : JNI_ERR;
+}
+
+// set a uint64_t global flag using value from AttachOperation
+static jint set_uint64_t_flag(const char* name, AttachOperation* op, outputStream* out) {
+  uint64_t value;
+  const char* arg1;
+  if ((arg1 = op->arg(1)) != NULL) {
+    int n = sscanf(arg1, UINT64_FORMAT, &value);
+    if (n != 1) {
+      out->print_cr("flag value must be an unsigned 64-bit integer");
+      return JNI_ERR;
+    }
+  }
+  bool res = CommandLineFlags::uint64_tAtPut((char*)name, &value, ATTACH_ON_DEMAND);
   if (! res) {
     out->print_cr("setting flag %s failed", name);
   }
@@ -261,10 +280,10 @@
 static jint set_ccstr_flag(const char* name, AttachOperation* op, outputStream* out) {
   const char* value;
   if ((value = op->arg(1)) == NULL) {
-    out->print_cr("flag value has to be a string");
+    out->print_cr("flag value must be a string");
     return JNI_ERR;
   }
-  bool res = CommandLineFlags::ccstrAtPut((char*)name,  &value, ATTACH_ON_DEMAND);
+  bool res = CommandLineFlags::ccstrAtPut((char*)name, &value, ATTACH_ON_DEMAND);
   if (res) {
     FREE_C_HEAP_ARRAY(char, value);
   } else {
@@ -291,6 +310,8 @@
       return set_intx_flag(name, op, out);
     } else if (f->is_uintx()) {
       return set_uintx_flag(name, op, out);
+    } else if (f->is_uint64_t()) {
+      return set_uint64_t_flag(name, op, out);
     } else if (f->is_ccstr()) {
       return set_ccstr_flag(name, op, out);
     } else {
@@ -416,7 +437,7 @@
                        string,
                        CHECK);
 
-  KlassHandle group(THREAD, SystemDictionary::threadGroup_klass());
+  KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
   JavaCalls::call_special(&result,
                         thread_group,
                         group,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/g1MemoryPool.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_g1MemoryPool.cpp.incl"
+
+G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
+                                     const char* name,
+                                     size_t init_size,
+                                     size_t max_size,
+                                     bool support_usage_threshold) :
+  _g1h(g1h), CollectedMemoryPool(name,
+                                 MemoryPool::Heap,
+                                 init_size,
+                                 max_size,
+                                 support_usage_threshold) {
+  assert(UseG1GC, "sanity");
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
+  return MAX2(eden_space_used(g1h), (size_t) HeapRegion::GrainBytes);
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
+  size_t young_list_length = g1h->young_list_length();
+  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
+  size_t survivor_used = survivor_space_used(g1h);
+  eden_used = subtract_up_to_zero(eden_used, survivor_used);
+  return eden_used;
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::eden_space_max(G1CollectedHeap* g1h) {
+  // This should ensure that it returns a value no smaller than the
+  // region size. Currently, eden_space_committed() guarantees that.
+  return eden_space_committed(g1h);
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
+  return MAX2(survivor_space_used(g1h), (size_t) HeapRegion::GrainBytes);
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
+  size_t survivor_num = g1h->g1_policy()->recorded_survivor_regions();
+  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
+  return survivor_used;
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::survivor_space_max(G1CollectedHeap* g1h) {
+  // This should ensure that it returns a value no smaller than the
+  // region size. Currently, survivor_space_committed() guarantees that.
+  return survivor_space_committed(g1h);
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
+  size_t committed = overall_committed(g1h);
+  size_t eden_committed = eden_space_committed(g1h);
+  size_t survivor_committed = survivor_space_committed(g1h);
+  committed = subtract_up_to_zero(committed, eden_committed);
+  committed = subtract_up_to_zero(committed, survivor_committed);
+  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
+  return committed;
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
+  size_t used = overall_used(g1h);
+  size_t eden_used = eden_space_used(g1h);
+  size_t survivor_used = survivor_space_used(g1h);
+  used = subtract_up_to_zero(used, eden_used);
+  used = subtract_up_to_zero(used, survivor_used);
+  return used;
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::old_space_max(G1CollectedHeap* g1h) {
+  size_t max = overall_max(g1h);
+  size_t eden_max = eden_space_max(g1h);
+  size_t survivor_max = survivor_space_max(g1h);
+  max = subtract_up_to_zero(max, eden_max);
+  max = subtract_up_to_zero(max, survivor_max);
+  max = MAX2(max, (size_t) HeapRegion::GrainBytes);
+  return max;
+}
+
+G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
+  G1MemoryPoolSuper(g1h,
+                    "G1 Eden",
+                    eden_space_committed(g1h), /* init_size */
+                    eden_space_max(g1h), /* max_size */
+                    false /* support_usage_threshold */) {
+}
+
+MemoryUsage G1EdenPool::get_memory_usage() {
+  size_t initial_sz = initial_size();
+  size_t max_sz     = max_size();
+  size_t used       = used_in_bytes();
+  size_t committed  = eden_space_committed(_g1h);
+
+  return MemoryUsage(initial_sz, used, committed, max_sz);
+}
+
+G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
+  G1MemoryPoolSuper(g1h,
+                    "G1 Survivor",
+                    survivor_space_committed(g1h), /* init_size */
+                    survivor_space_max(g1h), /* max_size */
+                    false /* support_usage_threshold */) {
+}
+
+MemoryUsage G1SurvivorPool::get_memory_usage() {
+  size_t initial_sz = initial_size();
+  size_t max_sz     = max_size();
+  size_t used       = used_in_bytes();
+  size_t committed  = survivor_space_committed(_g1h);
+
+  return MemoryUsage(initial_sz, used, committed, max_sz);
+}
+
+G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
+  G1MemoryPoolSuper(g1h,
+                    "G1 Old Gen",
+                    old_space_committed(g1h), /* init_size */
+                    old_space_max(g1h), /* max_size */
+                    true /* support_usage_threshold */) {
+}
+
+MemoryUsage G1OldGenPool::get_memory_usage() {
+  size_t initial_sz = initial_size();
+  size_t max_sz     = max_size();
+  size_t used       = used_in_bytes();
+  size_t committed  = old_space_committed(_g1h);
+
+  return MemoryUsage(initial_sz, used, committed, max_sz);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/g1MemoryPool.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class G1CollectedHeap;
+
+// This file contains the three classes that represent the memory
+// pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
+// G1OldGenPool. In G1, unlike our other GCs, we do not have a
+// physical space for each of those spaces. Instead, we allocate
+// regions for all three spaces out of a single pool of regions (that
+// pool basically covers the entire heap). As a result, the eden,
+// survivor, and old gen are considered logical spaces in G1, as each
+// is a set of non-contiguous regions. This is also reflected in the
+// way we map them to memory pools here. The easiest way to have done
+// this would have been to map the entire G1 heap to a single memory
+// pool. However, it's helpful to show how large the eden and survivor
+// get, as this does affect the performance and behavior of G1. Which
+// is why we introduce the three memory pools implemented here.
+//
+// The above approach inroduces a couple of challenging issues in the
+// implementation of the three memory pools:
+//
+// 1) The used space calculation for a pool is not necessarily
+// independent of the others. We can easily get from G1 the overall
+// used space in the entire heap, the number of regions in the young
+// generation (includes both eden and survivors), and the number of
+// survivor regions. So, from that we calculate:
+//
+//  survivor_used = survivor_num * region_size
+//  eden_used     = young_region_num * region_size - survivor_used
+//  old_gen_used  = overall_used - eden_used - survivor_used
+//
+// Note that survivor_used and eden_used are upper bounds. To get the
+// actual value we would have to iterate over the regions and add up
+// ->used(). But that'd be expensive. So, we'll accept some lack of
+// accuracy for those two. But, we have to be careful when calculating
+// old_gen_used, in case we subtract from overall_used more then the
+// actual number and our result goes negative.
+//
+// 2) Calculating the used space is straightforward, as described
+// above. However, how do we calculate the committed space, given that
+// we allocate space for the eden, survivor, and old gen out of the
+// same pool of regions? One way to do this is to use the used value
+// as also the committed value for the eden and survivor spaces and
+// then calculate the old gen committed space as follows:
+//
+//  old_gen_committed = overall_committed - eden_committed - survivor_committed
+//
+// Maybe a better way to do that would be to calculate used for eden
+// and survivor as a sum of ->used() over their regions and then
+// calculate committed as region_num * region_size (i.e., what we use
+// to calculate the used space now). This is something to consider
+// in the future.
+//
+// 3) Another decision that is again not straightforward is what is
+// the max size that each memory pool can grow to. Right now, we set
+// that the committed size for the eden and the survivors and
+// calculate the old gen max as follows (basically, it's a similar
+// pattern to what we use for the committed space, as described
+// above):
+//
+//  old_gen_max = overall_max - eden_max - survivor_max
+//
+// 4) Now, there is a very subtle issue with all the above. The
+// framework will call get_memory_usage() on the three pools
+// asynchronously. As a result, each call might get a different value
+// for, say, survivor_num which will yield inconsistent values for
+// eden_used, survivor_used, and old_gen_used (as survivor_num is used
+// in the calculation of all three). This would normally be
+// ok. However, it's possible that this might cause the sum of
+// eden_used, survivor_used, and old_gen_used to go over the max heap
+// size and this seems to sometimes cause JConsole (and maybe other
+// clients) to get confused. There's not a really an easy / clean
+// solution to this problem, due to the asynchrounous nature of the
+// framework.
+
+
+// This class is shared by the three G1 memory pool classes
+// (G1EdenPool, G1SurvivorPool, G1OldGenPool). Given that the way we
+// calculate used / committed bytes for these three pools is related
+// (see comment above), we put the calculations in this class so that
+// we can easily share them among the subclasses.
+class G1MemoryPoolSuper : public CollectedMemoryPool {
+private:
+  // It returns x - y if x > y, 0 otherwise.
+  // As described in the comment above, some of the inputs to the
+  // calculations we have to do are obtained concurrently and hence
+  // may be inconsistent with each other. So, this provides a
+  // defensive way of performing the subtraction and avoids the value
+  // going negative (which would mean a very large result, given that
+  // the parameter are size_t).
+  static size_t subtract_up_to_zero(size_t x, size_t y) {
+    if (x > y) {
+      return x - y;
+    } else {
+      return 0;
+    }
+  }
+
+protected:
+  G1CollectedHeap* _g1h;
+
+  // Would only be called from subclasses.
+  G1MemoryPoolSuper(G1CollectedHeap* g1h,
+                    const char* name,
+                    size_t init_size,
+                    size_t max_size,
+                    bool support_usage_threshold);
+
+  // The reason why all the code is in static methods is so that it
+  // can be safely called from the constructors of the subclasses.
+
+  static size_t overall_committed(G1CollectedHeap* g1h) {
+    return g1h->capacity();
+  }
+  static size_t overall_used(G1CollectedHeap* g1h) {
+    return g1h->used_unlocked();
+  }
+  static size_t overall_max(G1CollectedHeap* g1h) {
+    return g1h->g1_reserved_obj_bytes();
+  }
+
+  static size_t eden_space_committed(G1CollectedHeap* g1h);
+  static size_t eden_space_used(G1CollectedHeap* g1h);
+  static size_t eden_space_max(G1CollectedHeap* g1h);
+
+  static size_t survivor_space_committed(G1CollectedHeap* g1h);
+  static size_t survivor_space_used(G1CollectedHeap* g1h);
+  static size_t survivor_space_max(G1CollectedHeap* g1h);
+
+  static size_t old_space_committed(G1CollectedHeap* g1h);
+  static size_t old_space_used(G1CollectedHeap* g1h);
+  static size_t old_space_max(G1CollectedHeap* g1h);
+};
+
+// Memory pool that represents the G1 eden.
+class G1EdenPool : public G1MemoryPoolSuper {
+public:
+  G1EdenPool(G1CollectedHeap* g1h);
+
+  size_t used_in_bytes() {
+    return eden_space_used(_g1h);
+  }
+  size_t max_size() const {
+    return eden_space_max(_g1h);
+  }
+  MemoryUsage get_memory_usage();
+};
+
+// Memory pool that represents the G1 survivor.
+class G1SurvivorPool : public G1MemoryPoolSuper {
+public:
+  G1SurvivorPool(G1CollectedHeap* g1h);
+
+  size_t used_in_bytes() {
+    return survivor_space_used(_g1h);
+  }
+  size_t max_size() const {
+    return survivor_space_max(_g1h);
+  }
+  MemoryUsage get_memory_usage();
+};
+
+// Memory pool that represents the G1 old gen.
+class G1OldGenPool : public G1MemoryPoolSuper {
+public:
+  G1OldGenPool(G1CollectedHeap* g1h);
+
+  size_t used_in_bytes() {
+    return old_space_used(_g1h);
+  }
+  size_t max_size() const {
+    return old_space_max(_g1h);
+  }
+  MemoryUsage get_memory_usage();
+};
--- a/src/share/vm/services/heapDumper.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/heapDumper.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1274,7 +1274,7 @@
   if (o->is_klass()) return;
 
   // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
-  if (o->klass() == SystemDictionary::class_klass()) {
+  if (o->klass() == SystemDictionary::Class_klass()) {
     if (!java_lang_Class::is_primitive(o)) {
       return;
     }
--- a/src/share/vm/services/lowMemoryDetector.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/lowMemoryDetector.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -32,7 +32,7 @@
 void LowMemoryDetector::initialize() {
   EXCEPTION_MARK;
 
-  instanceKlassHandle klass (THREAD,  SystemDictionary::thread_klass());
+  instanceKlassHandle klass (THREAD,  SystemDictionary::Thread_klass());
   instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
 
   const char thread_name[] = "Low Memory Detector";
--- a/src/share/vm/services/management.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/management.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -491,7 +491,7 @@
   int num_flags = Arguments::num_jvm_flags();
   int num_args = Arguments::num_jvm_args();
 
-  instanceKlassHandle ik (THREAD, SystemDictionary::string_klass());
+  instanceKlassHandle ik (THREAD, SystemDictionary::String_klass());
   objArrayOop r = oopFactory::new_objArray(ik(), num_args + num_flags, CHECK_NULL);
   objArrayHandle result_h(THREAD, r);
 
@@ -790,7 +790,7 @@
   assert(!has_undefined_init_size, "Undefined init size");
   assert(!has_undefined_max_size, "Undefined max size");
 
-  MemoryUsage usage((heap ? Arguments::initial_heap_size() : total_init),
+  MemoryUsage usage((heap ? InitialHeapSize : total_init),
                     total_used,
                     total_committed,
                     (heap ? Universe::heap()->max_capacity() : total_max));
@@ -1321,7 +1321,7 @@
   LoadedClassesEnumerator lce(THREAD);  // Pass current Thread as parameter
 
   int num_classes = lce.num_loaded_classes();
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), num_classes, CHECK_0);
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), num_classes, CHECK_0);
   objArrayHandle classes_ah(THREAD, r);
 
   for (int i = 0; i < num_classes; i++) {
@@ -1481,7 +1481,7 @@
   // last flag entry is always NULL, so subtract 1
   int nFlags = (int) Flag::numFlags - 1;
   // allocate a temp array
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
                                            nFlags, CHECK_0);
   objArrayHandle flags_ah(THREAD, r);
   int num_entries = 0;
@@ -1497,7 +1497,7 @@
 
   if (num_entries < nFlags) {
     // Return array of right length
-    objArrayOop res = oopFactory::new_objArray(SystemDictionary::string_klass(), num_entries, CHECK_0);
+    objArrayOop res = oopFactory::new_objArray(SystemDictionary::String_klass(), num_entries, CHECK_0);
     for(int i = 0; i < num_entries; i++) {
       res->obj_at_put(i, flags_ah->obj_at(i));
     }
@@ -1507,16 +1507,17 @@
   return (jobjectArray)JNIHandles::make_local(env, flags_ah());
 JVM_END
 
-// utility function used by jmm_GetVMGlobals
-void add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, Flag *flag, TRAPS) {
+// Utility function used by jmm_GetVMGlobals.  Returns false if flag type
+// can't be determined, true otherwise.  If false is returned, then *global
+// will be incomplete and invalid.
+bool add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, Flag *flag, TRAPS) {
   Handle flag_name;
   if (name() == NULL) {
-    flag_name = java_lang_String::create_from_str(flag->name, CHECK);
+    flag_name = java_lang_String::create_from_str(flag->name, CHECK_false);
   } else {
     flag_name = name;
   }
   global->name = (jstring)JNIHandles::make_local(env, flag_name());
-  global->type = JMM_VMGLOBAL_TYPE_UNKNOWN;
 
   if (flag->is_bool()) {
     global->value.z = flag->get_bool() ? JNI_TRUE : JNI_FALSE;
@@ -1527,10 +1528,17 @@
   } else if (flag->is_uintx()) {
     global->value.j = (jlong)flag->get_uintx();
     global->type = JMM_VMGLOBAL_TYPE_JLONG;
+  } else if (flag->is_uint64_t()) {
+    global->value.j = (jlong)flag->get_uint64_t();
+    global->type = JMM_VMGLOBAL_TYPE_JLONG;
   } else if (flag->is_ccstr()) {
-    Handle str = java_lang_String::create_from_str(flag->get_ccstr(), CHECK);
+    Handle str = java_lang_String::create_from_str(flag->get_ccstr(), CHECK_false);
     global->value.l = (jobject)JNIHandles::make_local(env, str());
     global->type = JMM_VMGLOBAL_TYPE_JSTRING;
+  } else {
+    global->type = JMM_VMGLOBAL_TYPE_UNKNOWN;
+    assert(false, "Unsupported VMGlobal Type");
+    return false;
   }
 
   global->writeable = flag->is_writeable();
@@ -1557,6 +1565,8 @@
     default:
       global->origin = JMM_VMGLOBAL_ORIGIN_OTHER;
   }
+
+  return true;
 }
 
 // Fill globals array of count length with jmmVMGlobal entries
@@ -1583,7 +1593,7 @@
     objArrayHandle names_ah(THREAD, ta);
     // Make sure we have a String array
     klassOop element_klass = objArrayKlass::cast(names_ah->klass())->element_klass();
-    if (element_klass != SystemDictionary::string_klass()) {
+    if (element_klass != SystemDictionary::String_klass()) {
       THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
                  "Array element type is not String class", 0);
     }
@@ -1599,8 +1609,8 @@
       Handle sh(THREAD, s);
       char* str = java_lang_String::as_utf8_string(s);
       Flag* flag = Flag::find_flag(str, strlen(str));
-      if (flag != NULL) {
-        add_global_entry(env, sh, &globals[i], flag, THREAD);
+      if (flag != NULL &&
+          add_global_entry(env, sh, &globals[i], flag, THREAD)) {
         num_entries++;
       } else {
         globals[i].name = NULL;
@@ -1617,8 +1627,8 @@
     for (int i = 0; i < nFlags && num_entries < count;  i++) {
       Flag* flag = &Flag::flags[i];
       // Exclude the locked (diagnostic, experimental) flags
-      if (flag->is_unlocked() || flag->is_unlocker()) {
-        add_global_entry(env, null_h, &globals[num_entries], flag, THREAD);
+      if ((flag->is_unlocked() || flag->is_unlocker()) &&
+          add_global_entry(env, null_h, &globals[num_entries], flag, THREAD)) {
         num_entries++;
       }
     }
@@ -1650,11 +1660,14 @@
     bool bvalue = (new_value.z == JNI_TRUE ? true : false);
     succeed = CommandLineFlags::boolAtPut(name, &bvalue, MANAGEMENT);
   } else if (flag->is_intx()) {
-    intx ivalue = new_value.j;
+    intx ivalue = (intx)new_value.j;
     succeed = CommandLineFlags::intxAtPut(name, &ivalue, MANAGEMENT);
   } else if (flag->is_uintx()) {
-    uintx uvalue = new_value.j;
+    uintx uvalue = (uintx)new_value.j;
     succeed = CommandLineFlags::uintxAtPut(name, &uvalue, MANAGEMENT);
+  } else if (flag->is_uint64_t()) {
+    uint64_t uvalue = (uint64_t)new_value.j;
+    succeed = CommandLineFlags::uint64_tAtPut(name, &uvalue, MANAGEMENT);
   } else if (flag->is_ccstr()) {
     oop str = JNIHandles::resolve_external_guard(new_value.l);
     if (str == NULL) {
@@ -1734,7 +1747,7 @@
 
   // Make sure we have a String array
   klassOop element_klass = objArrayKlass::cast(names_ah->klass())->element_klass();
-  if (element_klass != SystemDictionary::string_klass()) {
+  if (element_klass != SystemDictionary::String_klass()) {
     THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
                "Array element type is not String class", 0);
   }
@@ -1769,7 +1782,7 @@
     num_threads += cycle->num_threads();
   }
 
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::thread_klass(), num_threads, CHECK_NH);
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::Thread_klass(), num_threads, CHECK_NH);
   objArrayHandle threads_ah(THREAD, r);
 
   int index = 0;
--- a/src/share/vm/services/memoryManager.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/memoryManager.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -72,6 +72,14 @@
   return (GCMemoryManager*) new PSMarkSweepMemoryManager();
 }
 
+GCMemoryManager* MemoryManager::get_g1YoungGen_memory_manager() {
+  return (GCMemoryManager*) new G1YoungGenMemoryManager();
+}
+
+GCMemoryManager* MemoryManager::get_g1OldGen_memory_manager() {
+  return (GCMemoryManager*) new G1OldGenMemoryManager();
+}
+
 instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
   // Must do an acquire so as to force ordering of subsequent
   // loads from anything _memory_mgr_obj points to or implies.
--- a/src/share/vm/services/memoryManager.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/memoryManager.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -54,7 +54,9 @@
     ParNew,
     ConcurrentMarkSweep,
     PSScavenge,
-    PSMarkSweep
+    PSMarkSweep,
+    G1YoungGen,
+    G1OldGen
   };
 
   MemoryManager();
@@ -85,6 +87,8 @@
   static GCMemoryManager* get_cms_memory_manager();
   static GCMemoryManager* get_psScavenge_memory_manager();
   static GCMemoryManager* get_psMarkSweep_memory_manager();
+  static GCMemoryManager* get_g1YoungGen_memory_manager();
+  static GCMemoryManager* get_g1OldGen_memory_manager();
 
 };
 
@@ -231,3 +235,21 @@
   MemoryManager::Name kind() { return MemoryManager::PSMarkSweep; }
   const char* name()         { return "PS MarkSweep"; }
 };
+
+class G1YoungGenMemoryManager : public GCMemoryManager {
+private:
+public:
+  G1YoungGenMemoryManager() : GCMemoryManager() {}
+
+  MemoryManager::Name kind() { return MemoryManager::G1YoungGen; }
+  const char* name()         { return "G1 Young Generation"; }
+};
+
+class G1OldGenMemoryManager : public GCMemoryManager {
+private:
+public:
+  G1OldGenMemoryManager() : GCMemoryManager() {}
+
+  MemoryManager::Name kind() { return MemoryManager::G1OldGen; }
+  const char* name()         { return "G1 Old Generation"; }
+};
--- a/src/share/vm/services/memoryService.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/memoryService.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -60,8 +60,8 @@
       break;
     }
     case CollectedHeap::G1CollectedHeap : {
-      G1CollectedHeap::g1_unimplemented();
-      return;
+      add_g1_heap_info(G1CollectedHeap::heap());
+      break;
     }
 #endif // SERIALGC
     default: {
@@ -164,6 +164,19 @@
   add_psOld_memory_pool(heap->old_gen(), _major_gc_manager);
   add_psPerm_memory_pool(heap->perm_gen(), _major_gc_manager);
 }
+
+void MemoryService::add_g1_heap_info(G1CollectedHeap* g1h) {
+  assert(UseG1GC, "sanity");
+
+  _minor_gc_manager = MemoryManager::get_g1YoungGen_memory_manager();
+  _major_gc_manager = MemoryManager::get_g1OldGen_memory_manager();
+  _managers_list->append(_minor_gc_manager);
+  _managers_list->append(_major_gc_manager);
+
+  add_g1YoungGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager);
+  add_g1OldGen_memory_pool(g1h, _major_gc_manager);
+  add_g1PermGen_memory_pool(g1h, _major_gc_manager);
+}
 #endif // SERIALGC
 
 MemoryPool* MemoryService::add_gen(Generation* gen,
@@ -384,6 +397,64 @@
   mgr->add_pool(perm_gen);
   _pools_list->append(perm_gen);
 }
+
+void MemoryService::add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
+                                               MemoryManager* major_mgr,
+                                               MemoryManager* minor_mgr) {
+  assert(major_mgr != NULL && minor_mgr != NULL, "should have two managers");
+
+  G1EdenPool* eden = new G1EdenPool(g1h);
+  G1SurvivorPool* survivor = new G1SurvivorPool(g1h);
+
+  major_mgr->add_pool(eden);
+  major_mgr->add_pool(survivor);
+  minor_mgr->add_pool(eden);
+  minor_mgr->add_pool(survivor);
+  _pools_list->append(eden);
+  _pools_list->append(survivor);
+}
+
+void MemoryService::add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
+                                             MemoryManager* mgr) {
+  assert(mgr != NULL, "should have one manager");
+
+  G1OldGenPool* old_gen = new G1OldGenPool(g1h);
+  mgr->add_pool(old_gen);
+  _pools_list->append(old_gen);
+}
+
+void MemoryService::add_g1PermGen_memory_pool(G1CollectedHeap* g1h,
+                                              MemoryManager* mgr) {
+  assert(mgr != NULL, "should have one manager");
+
+  CompactingPermGenGen* perm_gen = (CompactingPermGenGen*) g1h->perm_gen();
+  PermanentGenerationSpec* spec = perm_gen->spec();
+  size_t max_size = spec->max_size() - spec->read_only_size()
+                                     - spec->read_write_size();
+  MemoryPool* pool = add_space(perm_gen->unshared_space(),
+                               "G1 Perm Gen",
+                               false, /* is_heap */
+                               max_size,
+                               true   /* support_usage_threshold */);
+  mgr->add_pool(pool);
+
+  // in case we support CDS in G1
+  if (UseSharedSpaces) {
+    pool = add_space(perm_gen->ro_space(),
+                     "G1 Perm Gen [shared-ro]",
+                     false, /* is_heap */
+                     spec->read_only_size(),
+                     true   /* support_usage_threshold */);
+    mgr->add_pool(pool);
+
+    pool = add_space(perm_gen->rw_space(),
+                     "G1 Perm Gen [shared-rw]",
+                     false, /* is_heap */
+                     spec->read_write_size(),
+                     true   /* support_usage_threshold */);
+    mgr->add_pool(pool);
+  }
+}
 #endif // SERIALGC
 
 void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
--- a/src/share/vm/services/memoryService.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/memoryService.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -40,6 +40,7 @@
 class ParallelScavengeHeap;
 class CompactingPermGenGen;
 class CMSPermGenGen;
+class G1CollectedHeap;
 
 // VM Monitoring and Management Support
 
@@ -88,6 +89,13 @@
   static void add_psPerm_memory_pool(PSPermGen* perm,
                                      MemoryManager* mgr);
 
+  static void add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
+                                         MemoryManager* major_mgr,
+                                         MemoryManager* minor_mgr);
+  static void add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
+                                       MemoryManager* mgr);
+  static void add_g1PermGen_memory_pool(G1CollectedHeap* g1h,
+                                        MemoryManager* mgr);
 
   static MemoryPool* add_space(ContiguousSpace* space,
                                const char* name,
@@ -111,6 +119,7 @@
 
   static void add_gen_collected_heap_info(GenCollectedHeap* heap);
   static void add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap);
+  static void add_g1_heap_info(G1CollectedHeap* g1h);
 
 public:
   static void set_universe_heap(CollectedHeap* heap);
--- a/src/share/vm/services/runtimeService.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/runtimeService.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -104,6 +104,13 @@
 
 void RuntimeService::record_safepoint_begin() {
   HS_DTRACE_PROBE(hs_private, safepoint__begin);
+
+  // Print the time interval in which the app was executing
+  if (PrintGCApplicationConcurrentTime) {
+    gclog_or_tty->print_cr("Application time: %3.7f seconds",
+                                last_application_time_sec());
+  }
+
   // update the time stamp to begin recording safepoint time
   _safepoint_timer.update();
   if (UsePerfData) {
@@ -122,6 +129,15 @@
 
 void RuntimeService::record_safepoint_end() {
   HS_DTRACE_PROBE(hs_private, safepoint__end);
+
+  // Print the time interval for which the app was stopped
+  // during the current safepoint operation.
+  if (PrintGCApplicationStoppedTime) {
+    gclog_or_tty->print_cr("Total time for which application threads "
+                           "were stopped: %3.7f seconds",
+                           last_safepoint_time_sec());
+  }
+
   // update the time stamp to begin recording app time
   _app_timer.update();
   if (UsePerfData) {
--- a/src/share/vm/services/serviceUtil.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/serviceUtil.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -45,7 +45,7 @@
     // instance
     if (o->is_instance()) {
       // instance objects are visible
-      if (o->klass() != SystemDictionary::class_klass()) {
+      if (o->klass() != SystemDictionary::Class_klass()) {
         return true;
       }
       if (java_lang_Class::is_primitive(o)) {
--- a/src/share/vm/services/threadService.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/services/threadService.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -540,7 +540,7 @@
 }
 
 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
-  klassOop k = SystemDictionary::stackTraceElement_klass();
+  klassOop k = SystemDictionary::StackTraceElement_klass();
   assert(k != NULL, "must be loaded in 1.4+");
   instanceKlassHandle ik(THREAD, k);
 
--- a/src/share/vm/utilities/constantTag.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/utilities/constantTag.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -36,7 +36,8 @@
   JVM_CONSTANT_UnresolvedString         = 102,  // Temporary tag until actual use
   JVM_CONSTANT_StringIndex              = 103,  // Temporary tag while constructing constant pool
   JVM_CONSTANT_UnresolvedClassInError   = 104,  // Error tag due to resolution error
-  JVM_CONSTANT_InternalMax              = 104   // Last implementation tag
+  JVM_CONSTANT_Object                   = 105,  // Required for BoundMethodHandle arguments.
+  JVM_CONSTANT_InternalMax              = 105   // Last implementation tag
 };
 
 
@@ -70,6 +71,8 @@
   bool is_unresolved_string() const { return _tag == JVM_CONSTANT_UnresolvedString; }
   bool is_string_index() const      { return _tag == JVM_CONSTANT_StringIndex; }
 
+  bool is_object() const            { return _tag == JVM_CONSTANT_Object; }
+
   bool is_klass_reference() const   { return is_klass_index() || is_unresolved_klass(); }
   bool is_klass_or_reference() const{ return is_klass() || is_klass_reference(); }
   bool is_field_or_method() const   { return is_field() || is_method() || is_interface_method(); }
--- a/src/share/vm/utilities/exceptions.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/utilities/exceptions.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -122,7 +122,7 @@
   // Check for special boot-strapping/vm-thread handling
   if (special_exception(thread, file, line, h_exception)) return;
 
-  assert(h_exception->is_a(SystemDictionary::throwable_klass()), "exception is not a subclass of java/lang/Throwable");
+  assert(h_exception->is_a(SystemDictionary::Throwable_klass()), "exception is not a subclass of java/lang/Throwable");
 
   // set the pending exception
   thread->set_pending_exception(h_exception(), file, line);
@@ -255,7 +255,7 @@
 
     // Future: object initializer should take a cause argument
     if (h_cause() != NULL) {
-      assert(h_cause->is_a(SystemDictionary::throwable_klass()),
+      assert(h_cause->is_a(SystemDictionary::Throwable_klass()),
           "exception cause is not a subclass of java/lang/Throwable");
       JavaValue result1(T_OBJECT);
       JavaCallArguments args1;
--- a/src/share/vm/utilities/globalDefinitions.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -139,6 +139,10 @@
 const size_t G                  = M*K;
 const size_t HWperKB            = K / sizeof(HeapWord);
 
+const size_t LOG_K              = 10;
+const size_t LOG_M              = 2 * LOG_K;
+const size_t LOG_G              = 2 * LOG_M;
+
 const jint min_jint = (jint)1 << (sizeof(jint)*BitsPerByte-1); // 0x80000000 == smallest jint
 const jint max_jint = (juint)min_jint - 1;                     // 0x7FFFFFFF == largest jint
 
--- a/src/share/vm/utilities/growableArray.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/utilities/growableArray.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -278,6 +278,17 @@
     _len--;
   }
 
+  // inserts the given element before the element at index i
+  void insert_before(const int idx, const E& elem) {
+    check_nesting();
+    if (_len == _max) grow(_len);
+    for (int j = _len - 1; j >= idx; j--) {
+      _data[j + 1] = _data[j];
+    }
+    _len++;
+    _data[idx] = elem;
+  }
+
   void appendAll(const GrowableArray<E>* l) {
     for (int i = 0; i < l->_len; i++) {
       raw_at_put_grow(_len, l->_data[i], 0);
--- a/src/share/vm/utilities/numberSeq.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/utilities/numberSeq.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -241,3 +241,33 @@
 
   return b0 + b1 * num;
 }
+
+
+// Printing/Debugging Support
+
+void AbsSeq::dump() { dump_on(gclog_or_tty); }
+
+void AbsSeq::dump_on(outputStream* s) {
+  s->print_cr("\t _num = %d, _sum = %7.3f, _sum_of_squares = %7.3f",
+                  _num,      _sum,         _sum_of_squares);
+  s->print_cr("\t _davg = %7.3f, _dvariance = %7.3f, _alpha = %7.3f",
+                  _davg,         _dvariance,         _alpha);
+}
+
+void NumberSeq::dump_on(outputStream* s) {
+  AbsSeq::dump_on(s);
+  s->print_cr("\t\t _last = %7.3f, _maximum = %7.3f");
+}
+
+void TruncatedSeq::dump_on(outputStream* s) {
+  AbsSeq::dump_on(s);
+  s->print_cr("\t\t _length = %d, _next = %d", _length, _next);
+  for (int i = 0; i < _length; i++) {
+    if (i%5 == 0) {
+      s->cr();
+      s->print("\t");
+    }
+    s->print("\t[%d]=%7.3f", i, _sequence[i]);
+  }
+  s->print_cr("");
+}
--- a/src/share/vm/utilities/numberSeq.hpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/utilities/numberSeq.hpp	Wed Mar 24 17:16:33 2010 -0700
@@ -74,6 +74,10 @@
   double davg() const; // decaying average
   double dvariance() const; // decaying variance
   double dsd() const; // decaying "standard deviation"
+
+  // Debugging/Printing
+  virtual void dump();
+  virtual void dump_on(outputStream* s);
 };
 
 class NumberSeq: public AbsSeq {
@@ -91,6 +95,9 @@
   virtual void add(double val);
   virtual double maximum() const { return _maximum; }
   virtual double last() const { return _last; }
+
+  // Debugging/Printing
+  virtual void dump_on(outputStream* s);
 };
 
 class TruncatedSeq: public AbsSeq {
@@ -114,4 +121,7 @@
 
   double oldest() const; // the oldest valid value in the sequence
   double predict_next() const; // prediction based on linear regression
+
+  // Debugging/Printing
+  virtual void dump_on(outputStream* s);
 };
--- a/src/share/vm/utilities/vmError.cpp	Mon Mar 15 15:51:36 2010 -0400
+++ b/src/share/vm/utilities/vmError.cpp	Wed Mar 24 17:16:33 2010 -0700
@@ -502,6 +502,23 @@
 #endif // ZERO
      }
 
+  STEP(135, "(printing target Java thread stack)" )
+
+     // printing Java thread stack trace if it is involved in GC crash
+     if (_verbose && (_thread->is_Named_thread())) {
+       JavaThread*  jt = ((NamedThread *)_thread)->processed_thread();
+       if (jt != NULL) {
+         st->print_cr("JavaThread " PTR_FORMAT " (nid = " UINTX_FORMAT ") was being processed", jt, jt->osthread()->thread_id());
+         if (jt->has_last_Java_frame()) {
+           st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)");
+           for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) {
+             sfs.current()->print_on_error(st, buf, sizeof(buf), true);
+             st->cr();
+           }
+         }
+       }
+     }
+
   STEP(140, "(printing VM operation)" )
 
      if (_verbose && _thread && _thread->is_VM_thread()) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6769124/TestArrayCopy6769124.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6769124
+ * @summary arraycopy may crash the VM with c1 on 64 bit
+ */
+
+public class TestArrayCopy6769124 {
+
+    public static void main(String[] args) {
+
+        int k = 1 << 31;
+
+
+        for(int j = 0; j <1000000; j++) {
+            int i = -1;
+            while(i < 10) {
+                i++;
+            }
+
+            int m = k * i;
+
+            int[] O1 = new int[20];
+            int[] O2 = new int[20];
+
+            System.arraycopy(O1, i, O2, i, 1); //will crash on amd64
+            System.arraycopy(O1, m, O2, m, 1); //will crash on sparcv9
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6769124/TestDeoptInt6769124.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6769124
+ * @summary int value might not be correctly decoded on deopt with c1 on 64 bit
+ *
+ * @run main/othervm -Xcomp -XX:CompileOnly=TestDeoptInt6769124.m TestDeoptInt6769124
+ */
+
+public class TestDeoptInt6769124 {
+
+    static class A {
+        volatile int vl;
+        A(int v) {
+            vl = v;
+        }
+    }
+
+    static void m(int b) {
+        A a = new A(10);
+        int c;
+        c = b + a.vl; //accessing volatile field of class not loaded at compile time forces a deopt
+        if(c != 20) {
+            System.out.println("a (= " + a.vl + ") + b (= " + b + ") = c (= " + c + ") != 20");
+            throw new InternalError();
+        }
+    }
+
+    public static void main(String[] args) {
+        m(10);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6769124/TestUnalignedLoad6769124.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6769124
+ * @summary unaligned load may fail with c1 on 64 bit
+ */
+
+public class TestUnalignedLoad6769124 {
+
+    static long l1v = 0x200000003L;
+    static long l2v = 0x400000005L;
+    static double d1v = Double.MAX_VALUE;
+    static double d2v = Double.MIN_VALUE;
+
+    public static void main(String[] args) {
+        long l1 = l1v;
+        double d1 = d1v;
+        long l2 = l2v;
+        double d2 = d2v;
+
+        // Run long enough to induce an OSR
+        for (int i = 0; i < 10000000; i++) {
+        }
+        boolean error = false;
+
+        if (l1 != l1v) {
+            System.out.println(l1 + " != " + l1v);
+            error = true;
+        }
+        if (l2 != l2v) {
+            System.out.println(l2 + " != " + l2v);
+            error = true;
+        }
+        if (d1 != d1v) {
+            System.out.println(d1 + " != " + d1v);
+            error = true;
+        }
+        if (d2 != d2v) {
+            System.out.println(d2 + " != " + d2v);
+            error = true;
+        }
+        if (error) {
+            throw new InternalError();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6792161/Test6792161.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6792161
+ * @summary assert("No dead instructions after post-alloc")
+ *
+ * @run main/othervm -Xcomp -XX:MaxInlineSize=120 Test6792161
+ */
+
+import java.lang.reflect.Constructor;
+public class Test6792161 {
+    static Constructor test(Class cls) throws Exception {
+        Class[] args= { String.class };
+        try {
+            return cls.getConstructor(args);
+        } catch (NoSuchMethodException e) {}
+        return cls.getConstructor(new Class[0]);
+    }
+    public static void main(final String[] args) throws Exception {
+        try {
+            for (int i = 0; i < 100000; i++) {
+                Constructor ctor = test(Class.forName("Test6792161"));
+            }
+        } catch (NoSuchMethodException e) {}
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6852078/Test6852078.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6852078
+ * @summary Disable SuperWord optimization for unsafe read/write
+ *
+ * @run main/othervm Test6852078
+ */
+
+import java.util.*;
+import java.nio.ByteBuffer;
+import com.sun.corba.se.impl.encoding.ByteBufferWithInfo;
+import com.sun.jndi.toolkit.corba.CorbaUtils;
+
+public class Test6852078 {
+
+    public Test6852078(String [] args) {
+
+        int capacity = 128;
+        ByteBuffer bb = ByteBuffer.allocateDirect(capacity);
+        ByteBufferWithInfo bbwi = new ByteBufferWithInfo( CorbaUtils.getOrb(null, -1, new Hashtable()), bb);
+        byte[] tmpBuf;
+        tmpBuf = new byte[bbwi.buflen];
+
+        for (int i = 0; i < capacity; i++)
+            tmpBuf[i] = bbwi.byteBuffer.get(i);
+    }
+
+    public static void main(String [] args) {
+        for (int i=0; i<2000; i++) {
+            Test6852078 t = new Test6852078(args);
+        }
+    }
+}
--- a/test/compiler/6877254/Test.java	Mon Mar 15 15:51:36 2010 -0400
+++ b/test/compiler/6877254/Test.java	Wed Mar 24 17:16:33 2010 -0700
@@ -26,7 +26,7 @@
  * @bug 6877254
  * @summary Implement StoreCMNode::Ideal to promote its OopStore above the MergeMem
  *
- * @run main/othervm -server -Xcomp -XX:+UseConcMarkSweepGC Test
+ * @run main/othervm -Xcomp Test
  */
 
 public class Test {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6895383/Test.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6895383
+ * @summary JCK test throws NPE for method compiled with Escape Analysis
+ *
+ * @run main/othervm -Xcomp Test
+ */
+
+import java.util.*;
+import java.util.concurrent.*;
+
+public class Test {
+    public static void main(String argv[]) {
+        Test test = new Test();
+        test.testRemove1_IndexOutOfBounds();
+        test.testAddAll1_IndexOutOfBoundsException();
+    }
+
+    public void testRemove1_IndexOutOfBounds() {
+        CopyOnWriteArrayList c = new CopyOnWriteArrayList();
+    }
+
+    public void testAddAll1_IndexOutOfBoundsException() {
+        try {
+            CopyOnWriteArrayList c = new CopyOnWriteArrayList();
+            c.addAll(-1, new LinkedList()); // should throw IndexOutOfBoundsException
+        } catch (IndexOutOfBoundsException e) {
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6896727/Test.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 6896727
+ * @summary nsk/logging/LoggingPermission/LoggingPermission/logperm002 fails with G1, EscapeAnalisys w/o COOPs
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:+DoEscapeAnalysis Test
+ */
+
+public class Test {
+
+    final static String testString = "abracadabra";
+    public static void main(String args[]) {
+        String params[][] = {
+            {"control", testString}
+        };
+        for (int i=0; i<params.length; i++) {
+            try {
+                System.out.println("Params :" + testString + " and " + params[i][0] + ", " + params[i][1]);
+                if (params[i][1] == null) {
+                    System.exit(97);
+                }
+            } catch (Exception e) {}
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6901572/Test.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6901572
+ * @summary JVM 1.6.16 crash on loops: assert(has_node(i),"")
+ *
+ * @run main/othervm Test
+ */
+
+
+public class Test {
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 2; i++)
+            NestedLoop();
+    }
+
+    public static long NestedLoop() {
+        final int n = 50;
+        long startTime = System.currentTimeMillis();
+        int x = 0;
+        for(int a = 0; a < n; a++)
+            for(int b = 0; b < n; b++)
+                for(int c = 0; c < n; c++)
+                    for(int d = 0; d < n; d++)
+                        for(int e = 0; e < n; e++)
+                            for(int f = 0; f < n; f++)
+                                x++;
+        long stopTime = System.currentTimeMillis();
+
+        return stopTime - startTime;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6909839/Test6909839.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6909839
+ * @summary missing unsigned compare cases for some cmoves in sparc.ad
+ *
+ * @run main/othervm -XX:+AggressiveOpts -Xbatch Test6909839
+ */
+
+public class Test6909839 {
+    public static void main(String[] args) {
+        testi();
+        testi();
+        testi();
+        testui();
+        testui();
+        testui();
+        testdi();
+        testdi();
+        testdi();
+        testfi();
+        testfi();
+        testfi();
+
+        testl();
+        testl();
+        testl();
+        testul();
+        testul();
+        testul();
+        testdl();
+        testdl();
+        testdl();
+        testfl();
+        testfl();
+        testfl();
+
+        testf();
+        testf();
+        testf();
+        testuf();
+        testuf();
+        testuf();
+        testdf();
+        testdf();
+        testdf();
+        testff();
+        testff();
+        testff();
+
+        testd();
+        testd();
+        testd();
+        testud();
+        testud();
+        testud();
+        testdd();
+        testdd();
+        testdd();
+        testfd();
+        testfd();
+        testfd();
+
+        testp();
+        testp();
+        testp();
+        testup();
+        testup();
+        testup();
+        testdp();
+        testdp();
+        testdp();
+        testfp();
+        testfp();
+        testfp();
+    }
+
+    static void testui() {
+        int total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += ((v >= 1 && v < 3) ? 1 : 2);
+        }
+        System.out.println(total);
+    }
+
+    static void testdi() {
+        int total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += (v > 1.0) ? 1 : 2;
+        }
+        System.out.println(total);
+    }
+
+    static void testfi() {
+        int total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += (v > 1.0f) ? 1 : 2;
+        }
+        System.out.println(total);
+    }
+
+    static void testi() {
+        int total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            total += (i % 4 != 0) ? 1 : 2;
+        }
+        System.out.println(total);
+    }
+
+    static void testul() {
+        long total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += ((v >= 1 && v < 3) ? 1L : 2L);
+        }
+        System.out.println(total);
+    }
+
+    static void testdl() {
+        long total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += (v > 1.0) ? 1L : 2L;
+        }
+        System.out.println(total);
+    }
+
+    static void testfl() {
+        long total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += (v > 1.0f) ? 1L : 2L;
+        }
+        System.out.println(total);
+    }
+
+    static void testl() {
+        long total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            total += (i % 4 != 0) ? 1L : 2L;
+        }
+        System.out.println(total);
+    }
+
+    static void testuf() {
+        float total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += ((v >= 1 && v < 3) ? 1.0f : 2.0f);
+        }
+        System.out.println(total);
+    }
+
+    static void testdf() {
+        float total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += (v > 0.0) ? 1.0f : 2.0f;
+        }
+        System.out.println(total);
+    }
+
+    static void testff() {
+        float total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += (v > 0.0f) ? 1.0f : 2.0f;
+        }
+        System.out.println(total);
+    }
+
+    static void testf() {
+        float total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            total += (i % 4 != 0) ? 1.0f : 2.0f;
+        }
+        System.out.println(total);
+    }
+
+    static void testud() {
+        double total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += ((v >= 1 && v < 3) ? 1.0d : 2.0d);
+        }
+        System.out.println(total);
+    }
+
+    static void testdd() {
+        double total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += (v > 1.0) ? 1.0d : 2.0d;
+        }
+        System.out.println(total);
+    }
+
+    static void testfd() {
+        double total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += (v > 1.0f) ? 1.0d : 2.0d;
+        }
+        System.out.println(total);
+    }
+
+    static void testd() {
+        double total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            total += (i % 4 != 0) ? 1.0d : 2.0d;
+        }
+        System.out.println(total);
+    }
+
+    static void testp() {
+        Object a = new Object();
+        Object b = new Object();;
+        int total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            total += ((i % 4 != 0) ? a : b).hashCode();
+        }
+        System.out.println(total);
+    }
+
+    static void testup() {
+        Object a = new Object();
+        Object b = new Object();;
+        int total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += ((v >= 1 && v < 3) ? a : b).hashCode();
+        }
+        System.out.println(total);
+    }
+
+    static void testdp() {
+        Object a = new Object();
+        Object b = new Object();;
+        int total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += ((v > 1.0) ? a : b).hashCode();
+        }
+        System.out.println(total);
+    }
+    static void testfp() {
+        Object a = new Object();
+        Object b = new Object();;
+        int total = 0;
+        for (int i = 0 ; i < 10000; i++) {
+            int v = i % 4;
+            total += ((v > 1.0f) ? a : b).hashCode();
+        }
+        System.out.println(total);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6910484/Test.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2009 SAP.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6910484
+ * @summary incorrect integer optimization (loosing and op-r in a given example)
+ *
+ * @run main/othervm -Xbatch Test
+ */
+
+public class Test {
+
+    public static void main(String[] args) {
+        long iteration = 0;
+        for(int i = 0; i <11000; i++) {
+            iteration++;
+            int result = test(255);
+            if (result != 112) {
+                System.out.println("expected 112, but got " + result + " after iteration " + iteration);
+                System.exit(97);
+            }
+        }
+    }
+
+    private static int test(int x) {
+        return (x & -32) / 2;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6910605/Test.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6910605
+ * @summary C2: NullPointerException/ClassCaseException is thrown when C2 with DeoptimizeALot is used
+ *
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+DeoptimizeALot -Xbatch Test
+ *
+ * original test: nsk/coverage/runtime/runtime007
+ */
+
+import java.io.*;
+
+public class Test {
+        public static int buf=0;
+
+        public static void main( String argv[] ) {
+                System.exit(run(argv, System.out)+95);
+        }
+
+        public static int run(String argv[],PrintStream out) {
+                int ret=0, retx=0, bad=0;
+
+                for( int i=0; (i < 100000) && (bad < 10) ; i++ ) {
+                        retx = OptoRuntime_f2i_Type(out);
+                        ret += retx;
+                        if( retx !=0 ) {
+                                out.println("i="+i);
+                                bad++;
+                        }
+                }
+                return ret==0 ? 0 : 2 ;
+        }
+
+        public static int OptoRuntime_f2i_Type(PrintStream out) {
+                int c1=2, c2=3, c3=4, c4=5, c5=6;
+                int j=0, k=0;
+                try {
+                        int[][] iii=(int[][])(new int[c1][c2]);
+
+                        for( j=0; j<c1; j++ ) {
+                                for( k=0; k<c2; k++ ) {
+                                        iii[j][k]=(int)((float)(j+1)/(float)(k+1));
+                                }
+                        }
+                } catch (Throwable e) {
+                        out.println("Unexpected exception " + e);
+                        e.printStackTrace(out);
+                        out.println("j="+j+", k="+k);
+                        return 1;
+                }
+                return 0;
+        }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6910618/Test.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6910605
+ * @summary C2: NullPointerException/ClassCaseException is thrown when C2 with DeoptimizeALot is used
+ *
+ * @run main/othervm -Xmx64m -XX:+IgnoreUnrecognizedVMOptions -XX:+DeoptimizeALot -XX:+DoEscapeAnalysis -Xbatch -XX:InlineSmallCode=2000 Test
+ *
+ */
+
+/*
+ * Added InlineSmallCode=2000 to guaranty inlining of StringBuilder::append() to allow scalar replace StringBuilder object.
+ *
+ * original test: gc/gctests/StringGC
+ */
+
+public class Test {
+        private final String toAdd = "0123456789abcdef";
+        private int maxLength;
+        private static final int numberOfThreads = 8;
+
+        private class StringAdder extends Thread {
+                private String s;
+
+                public void test() {
+                        s = s + toAdd;
+                }
+                public void run() {
+                        do {
+                                test();
+                        } while (s.length() < maxLength);
+                 }
+        }
+
+        public void test() throws InterruptedException {
+                maxLength = toAdd.length() * 15000/ numberOfThreads;
+                StringAdder[] sa = new StringAdder[numberOfThreads];
+                for (int i = 0; i < numberOfThreads; i++) {
+                        sa[i] = new StringAdder();
+                        sa[i].start();
+                }
+                for (int i = 0; i < numberOfThreads; i++) {
+                        sa[i].join();
+                }
+        }
+
+        public static void main(String[] args) throws InterruptedException {
+                Test t = new Test();
+                t.test();
+        }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6912517/Test.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2009 D.E. Shaw.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6912517
+ * @summary JIT bug compiles out (and stops running) code that needs to be run.  Causes NPE.
+ *
+ * @run main/othervm -Xbatch -XX:CompileThreshold=100 -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops Test
+ */
+
+/**
+ * Highlights a bug with the JIT compiler.
+ * @author Matt Bruce m b r u c e __\at/__ g m a i l DOT c o m
+ */
+public class Test implements Runnable
+{
+    private final Thread myThread;
+    private Thread       myInitialThread;
+    private boolean      myShouldCheckThreads;
+
+    /**
+     * Sets up the running thread, and starts it.
+     */
+    public Test(int id)
+    {
+        myThread = new Thread(this);
+        myThread.setName("Runner: " + id);
+        myThread.start();
+        myShouldCheckThreads = false;
+    }
+
+    /**
+     * @param shouldCheckThreads the shouldCheckThreads to set
+     */
+    public void setShouldCheckThreads(boolean shouldCheckThreads)
+    {
+        myShouldCheckThreads = shouldCheckThreads;
+    }
+
+    /**
+     * Starts up the two threads with enough delay between them for JIT to
+     * kick in.
+     * @param args
+     * @throws InterruptedException
+     */
+    public static void main(String[] args) throws InterruptedException
+    {
+        // let this run for a bit, so the "run" below is JITTed.
+        for (int id = 0; id < 20; id++) {
+            System.out.println("Starting thread: " + id);
+            Test bug = new Test(id);
+            bug.setShouldCheckThreads(true);
+            Thread.sleep(2500);
+        }
+    }
+
+    /**
+     * @see java.lang.Runnable#run()
+     */
+    public void run()
+    {
+        long runNumber = 0;
+        while (true) {
+            // run hot for a little while, give JIT time to kick in to this loop.
+            // then run less hot.
+            if (runNumber > 15000) {
+                try {
+                    Thread.sleep(5);
+                }
+                catch (InterruptedException e) {
+                    e.printStackTrace();
+                }
+            }
+            runNumber++;
+            ensureProperCallingThread();
+        }
+    }
+
+    private void ensureProperCallingThread()
+    {
+        // this should never be null.  but with the JIT bug, it will be.
+        // JIT BUG IS HERE ==>>>>>
+        if (myShouldCheckThreads) {
+            if (myInitialThread == null) {
+                myInitialThread = Thread.currentThread();
+            }
+            else if (myInitialThread != Thread.currentThread()) {
+                System.out.println("Not working: " + myInitialThread.getName());
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6916644/Test6916644.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6916644
+ * @summary C2 compiler crash on x86
+ *
+ * @run main/othervm -Xcomp -XX:CompileOnly=Test6916644.test Test6916644
+ */
+
+public class Test6916644 {
+    static int result;
+    static int i1;
+    static int i2;
+
+    static public void test(double d) {
+        result = (d <= 0.0D) ? i1 : i2;
+    }
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 100000; i++) {
+            // use an alternating value so the test doesn't always go
+            // the same direction.  Otherwise we won't transform it
+            // into a cmove.
+            test(i & 1);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6921969/TestMultiplyLongHiZero.java	Wed Mar 24 17:16:33 2010 -0700
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2010 Google, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 6921969
+ * @summary Tests shorter long multiply sequences when the high 32 bits of long operands are known to be zero on x86_32
+ * @run main/othervm -Xbatch -XX:-Inline -XX:CompileOnly=.testNormal,.testLeftOptimized,.testRightOptimized,.testOptimized,.testLeftOptimized_LoadUI2L,.testRightOptimized_LoadUI2L,.testOptimized_LoadUI2L TestMultiplyLongHiZero
+ */
+
+// This test must run without any command line arguments.
+
+public class TestMultiplyLongHiZero {
+
+  private static void check(long leftFactor, long rightFactor, long optimizedProduct, long constantProduct) {
+    long normalProduct = leftFactor * rightFactor; // unaffected by the new optimization
+    if (optimizedProduct != constantProduct || normalProduct != constantProduct) {
+      throw new RuntimeException("Not all three products are equal: " +
+                                 Long.toHexString(normalProduct) + ", " +
+                                 Long.toHexString(optimizedProduct) + ", " +
+                                 Long.toHexString(constantProduct));
+    }
+  }
+
+  private static int initInt(String[] args, int v) {
+    if (args.length > 0) {
+      try {
+        return Integer.valueOf(args[0]);
+      } catch (NumberFormatException e) { }
+    }
+    return v;
+  }
+
+  private static final long mask32 = 0x00000000FFFFFFFFL;
+
+  private static void testNormal(int leftFactor, int rightFactor, long constantProduct) {
+    check((long) leftFactor,
+          (long) rightFactor,
+          (long) leftFactor * (long) rightFactor, // unaffected by the new optimization
+          constantProduct);
+  }
+
+  private static void testLeftOptimized(int leftFactor, int rightFactor, long constantProduct) {
+    check((leftFactor & mask32),
+          (long) rightFactor,
+          (leftFactor & mask32) * (long) rightFactor, // left factor optimized
+          constantProduct);
+  }
+
+  private static void testRightOptimized(int leftFactor, int rightFactor, long constantProduct) {
+    check((long) leftFactor,
+          (rightFactor & mask32),
+          (long) leftFactor * (rightFactor & mask32), // right factor optimized
+          constantProduct);
+  }
+
+  private static void testOptimized(int leftFactor, int rightFactor, long constantProduct) {
+    check((leftFactor & mask32),
+          (rightFactor & mask32),
+          (leftFactor & mask32) * (rightFactor & mask32), // both factors optimized
+          constantProduct);
+  }
+
+  private static void testLeftOptimized_LoadUI2L(int leftFactor, int rightFactor, long constantProduct, int[] factors) {
+    check((leftFactor & mask32),
+          (long) rightFactor,
+          (factors[0] & mask32) * (long) rightFactor, // left factor optimized
+          constantProduct);
+  }
+
+  private static void testRightOptimized_LoadUI2L(int leftFactor, int rightFactor, long constantProduct, int[] factors) {
+    check((long) leftFactor,
+          (rightFactor & mask32),
+          (long) leftFactor * (factors[1] & mask32), // right factor optimized
+          constantProduct);
+  }
+
+  private static void testOptimized_LoadUI2L(int leftFactor, int rightFactor, long constantProduct, int[] factors) {
+    check((leftFactor & mask32),
+          (rightFactor & mask32),
+          (factors[0] & mask32) * (factors[1] & mask32), // both factors optimized
+          constantProduct);
+  }
+
+  private static void test(int leftFactor, int rightFactor,
+                           long normalConstantProduct,
+                           long leftOptimizedConstantProduct,
+                           long rightOptimizedConstantProduct,
+                           long optimizedConstantProduct) {
+    int[] factors = new int[2];
+    factors[0] = leftFactor;
+    factors[1] = rightFactor;
+    testNormal(leftFactor, rightFactor, normalConstantProduct);
+    testLeftOptimized(leftFactor, rightFactor, leftOptimizedConstantProduct);
+    testRightOptimized(leftFactor, rightFactor, rightOptimizedConstantProduct);
+    testOptimized(leftFactor, rightFactor, optimizedConstantProduct);
+    testLeftOptimized_LoadUI2L(leftFactor, rightFactor, leftOptimizedConstantProduct, factors);
+    testRightOptimized_LoadUI2L(leftFactor, rightFactor, rightOptimizedConstantProduct, factors);
+    testOptimized_LoadUI2L(leftFactor, rightFactor, optimizedConstantProduct, factors);
+  }
+
+  public static void main(String[] args) {
+    for (int i = 0; i < 100000; ++i) { // Trigger compilation
+      int i0 = initInt(args, 1);
+      int i1 = initInt(args, 3);
+      int i2 = initInt(args, -1);
+      int i3 = initInt(args, 0x7FFFFFFF);
+      test(i0, i1, 3L, 3L, 3L, 3L);
+      test(i0, i2, -1L, -1L, 0xFFFFFFFFL, 0xFFFFFFFFL);
+      test(i0, i3, 0x7FFFFFFFL, 0x7FFFFFFFL, 0x7FFFFFFFL, 0x7FFFFFFFL);
+      test(i1, i2, -3L, -3L, 0x2FFFFFFFDL, 0x2FFFFFFFDL);
+      test(i1, i3, 0x17FFFFFFDL, 0x17FFFFFFDL, 0x17FFFFFFDL, 0x17FFFFFFDL);
+      test(i2, i3, 0xFFFFFFFF80000001L, 0x7FFFFFFE80000001L,
+           0xFFFFFFFF80000001L, 0x7FFFFFFE80000001L);
+    }
+  }
+}