changeset 2924:e5cb5aab85ca

Merge
author andrew
date Thu, 22 Dec 2011 15:46:11 +0000
parents b28ae681bae0 (current diff) 6259c6d3bbb7 (diff)
children 7bcec32b0c9a
files .hgtags agent/src/os/solaris/dbx/Makefile agent/src/os/solaris/dbx/README agent/src/os/solaris/dbx/README-commands.txt agent/src/os/solaris/dbx/helloWorld.cpp agent/src/os/solaris/dbx/proc_service_2.h agent/src/os/solaris/dbx/shell_imp.h agent/src/os/solaris/dbx/svc_agent_dbx.cpp agent/src/os/solaris/dbx/svc_agent_dbx.hpp agent/src/os/win32/BasicList.hpp agent/src/os/win32/Buffer.cpp agent/src/os/win32/Buffer.hpp agent/src/os/win32/Dispatcher.cpp agent/src/os/win32/Dispatcher.hpp agent/src/os/win32/Handler.hpp agent/src/os/win32/IOBuf.cpp agent/src/os/win32/IOBuf.hpp agent/src/os/win32/LockableList.hpp agent/src/os/win32/Makefile agent/src/os/win32/Message.hpp agent/src/os/win32/Monitor.cpp agent/src/os/win32/Monitor.hpp agent/src/os/win32/README-commands.txt agent/src/os/win32/README.txt agent/src/os/win32/Reaper.cpp agent/src/os/win32/Reaper.hpp agent/src/os/win32/SwDbgSrv.cpp agent/src/os/win32/SwDbgSrv.dsp agent/src/os/win32/SwDbgSrv.dsw agent/src/os/win32/SwDbgSub.cpp agent/src/os/win32/SwDbgSub.dsp agent/src/os/win32/initWinsock.cpp agent/src/os/win32/initWinsock.hpp agent/src/os/win32/ioUtils.cpp agent/src/os/win32/ioUtils.hpp agent/src/os/win32/isNT4.cpp agent/src/os/win32/isNT4.hpp agent/src/os/win32/libInfo.cpp agent/src/os/win32/libInfo.hpp agent/src/os/win32/nt4internals.cpp agent/src/os/win32/nt4internals.hpp agent/src/os/win32/ports.h agent/src/os/win32/procList.cpp agent/src/os/win32/procList.hpp agent/src/os/win32/serverLists.cpp agent/src/os/win32/serverLists.hpp agent/src/os/win32/toolHelp.cpp agent/src/os/win32/toolHelp.hpp agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxAddress.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebugger.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebuggerLocal.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxOopHandle.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxThreadFactory.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/sparc/DbxSPARCThread.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/sparc/DbxSPARCThreadContext.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/sparc/DbxSPARCThreadFactory.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/x86/DbxX86Thread.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/x86/DbxX86ThreadContext.java agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/x86/DbxX86ThreadFactory.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/AddressDataSource.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/DLL.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/TestDebugger.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/TestHelloWorld.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Address.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32CDebugInfoBuilder.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32CDebugger.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Debugger.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32DebuggerLocal.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32LDTEntry.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32LDTEntryConstants.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32OopHandle.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Thread.java agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32ThreadContext.java agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeFastAAccess0.java agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeFastIAccess0.java agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java agent/src/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64Frame.java agent/src/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64RegisterMap.java make/Makefile make/linux/Makefile make/linux/makefiles/buildtree.make make/linux/makefiles/defs.make make/linux/makefiles/gcc.make make/linux/makefiles/saproc.make make/linux/makefiles/vm.make make/solaris/makefiles/mapfile-vers-nonproduct make/solaris/makefiles/vm.make make/windows/makefiles/sa.make src/cpu/zero/vm/frame_zero.cpp src/cpu/zero/vm/methodHandles_zero.hpp src/cpu/zero/vm/sharedRuntime_zero.cpp src/cpu/zero/vm/stack_zero.cpp src/cpu/zero/vm/stubGenerator_zero.cpp src/os/linux/vm/os_linux.cpp src/share/vm/ci/ciEnv.cpp src/share/vm/classfile/systemDictionary.cpp src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp src/share/vm/memory/allocation.hpp src/share/vm/memory/collectorPolicy.cpp src/share/vm/opto/cfgnode.cpp src/share/vm/opto/loopnode.hpp src/share/vm/opto/type.hpp src/share/vm/prims/jni.cpp src/share/vm/prims/jvmtiEnv.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/os.cpp src/share/vm/runtime/reflectionCompat.hpp src/share/vm/runtime/vm_version.cpp src/share/vm/utilities/bitMap.hpp src/share/vm/utilities/ostream.cpp
diffstat 617 files changed, 25154 insertions(+), 25526 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Sep 28 23:13:07 2011 +0100
+++ b/.hgtags	Thu Dec 22 15:46:11 2011 +0000
@@ -186,3 +186,29 @@
 81d815b05abb564aa1f4100ae13491c949b9a07e jdk7-b147
 81d815b05abb564aa1f4100ae13491c949b9a07e hs21-b17
 7693eb0fce1f6b484cce96c233ea20bdad8a09e0 icedtea-2.0-branchpoint
+9b0ca45cd756d538c4c30afab280a91868eee1a5 jdk7u2-b01
+0cc8a70952c368e06de2adab1f2649a408f5e577 jdk8-b01
+31e253c1da429124bb87570ab095d9bc89850d0a jdk8-b02
+3a2fb61165dfc72e398179a2796d740c8da5b8c0 jdk8-b03
+0fa3ace511fe98fe948e751531f3e2b7c60c8376 jdk8-b04
+dce7d24674f4d0bed00de24f00119057fdce7cfb jdk8-b05
+0cc8a70952c368e06de2adab1f2649a408f5e577 hs22-b01
+7c29742c41b44fb0cd5a13c7ac8834f3f2ca649e hs22-b02
+3a2fb61165dfc72e398179a2796d740c8da5b8c0 hs22-b03
+ce9bde819dcba4a5d2822229d9183e69c74326ca hs22-b04
+8580b4f22e294cc1392a472e3ee263bca74751ce jdk7u2-b04
+2c820a7d4f304ebb6d310187eae20c15d6ced3b0 jdk7u2-b05
+43252bd4c09d30254de3b35148e5ea05e15f2cfb jdk7u2-b06
+8bab8fb7adb060d2dfcf4bb6b19281905fc0edb3 jdk7u2-b07
+513a84dd0f8b56dc0836b4e0bdd5dd0a778fc634 hs22-b05
+650d15d8f37255d3b805aa00c5bd1c30984b203d hs22-b06
+8035e71ac3f6c8a453f7e9483e7144731388b14e jdk7u2-b08
+cd3d4ec354fd040c1f47614991b3fe6d5cc5e9da hs22-b07
+b93bc193d73bd4d07150a3e8f85a8ca4bb18157c jdk7u2-b09
+623aec2a90f721fd0de9877bf7be8624874fd557 hs22-b08
+482e282037d780ca48a0eaaa4015b8ae20f0e0a9 jdk7u2-b11
+c8abdaa56b471195aefbac6ee385d7d35b8aec74 hs22-b09
+4061b13e3e6be63b6f157ef773f374f2355fdb48 jdk7u2-b12
+3ba0bb2e7c8ddac172f5b995aae57329cdd2dafa hs22-b10
+f17fe2f4b6aacc19cbb8ee39476f2f13a1c4d3cd jdk7u2-b13
+0744602f85c6fe62255326df595785eb2b32166d jdk7u2-b21
--- a/.jcheck/conf	Wed Sep 28 23:13:07 2011 +0100
+++ b/.jcheck/conf	Thu Dec 22 15:46:11 2011 +0000
@@ -1,1 +1,2 @@
 project=jdk7
+bugids=dup
--- a/THIRD_PARTY_README	Wed Sep 28 23:13:07 2011 +0100
+++ b/THIRD_PARTY_README	Thu Dec 22 15:46:11 2011 +0000
@@ -216,15 +216,16 @@
 is included with JRE 7, JDK 7, and OpenJDK 7.
 
 You are receiving a copy of the Elliptic Curve Cryptography library in source
-form with the JDK 7 source distribution and object code in the JRE 7 & JDK 7
-runtime.
-
-The terms of the Oracle license do NOT apply to the Elliptic Curve
-Cryptography library program; it is licensed under the following license,
-separately from the Oracle programs you receive. If you do not wish to install
-this program, you may delete the library named libsunec.so (on Solaris and
-Linux systems) or sunec.dll (on Windows systems) from the JRE bin directory
-reserved for native libraries.
+form with the JDK 7 and OpenJDK7 source distributions, and as object code in
+the JRE 7 & JDK 7 runtimes.
+
+In the case of the JRE 7 & JDK 7 runtimes, the terms of the Oracle license do
+NOT apply to the Elliptic Curve Cryptography library; it is licensed under the
+following license, separately from Oracle's JDK & JRE.  If you do not wish to
+install the Elliptic Curve Cryptography library, you may delete the library
+named libsunec.so (on Solaris and Linux systems) or sunec.dll (on Windows
+systems) from the JRE bin directory reserved for native libraries.
+
 
 --- begin of LICENSE ---
 
@@ -1000,7 +1001,7 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to libpng 1.2.18, which is 
+%% This notice is provided with respect to libpng 1.5.4, which is 
 included with JRE 7, JDK 7, and OpenJDK 7.
 
 --- begin of LICENSE ---
@@ -1014,8 +1015,10 @@
 If you modify libpng you may insert additional notices immediately following
 this sentence.
 
-libpng versions 1.2.6, August 15, 2004, through 1.2.18, May 15, 2007, are
-Copyright (c) 2004, 2006-2007 Glenn Randers-Pehrson, and are
+This code is released under the libpng license.
+
+libpng versions 1.2.6, August 15, 2004, through 1.5.4, July 7, 2011, are
+Copyright (c) 2004, 2006-2011 Glenn Randers-Pehrson, and are
 distributed according to the same disclaimer and license as libpng-1.2.5
 with the following individual added to the list of Contributing Authors
 
@@ -1112,7 +1115,7 @@
 
 Glenn Randers-Pehrson
 glennrp at users.sourceforge.net
-May 15, 2007
+July 7, 2011
 
 --- end of LICENSE ---
 
--- a/agent/make/Makefile	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/make/Makefile	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,7 @@
 sun.jvm.hotspot.bugspot \
 sun.jvm.hotspot.bugspot.tree \
 sun.jvm.hotspot.c1 \
+sun.jvm.hotspot.ci \
 sun.jvm.hotspot.code \
 sun.jvm.hotspot.compiler \
 sun.jvm.hotspot.debugger \
@@ -56,9 +57,6 @@
 sun.jvm.hotspot.debugger.cdbg.basic \
 sun.jvm.hotspot.debugger.cdbg.basic.amd64 \
 sun.jvm.hotspot.debugger.cdbg.basic.x86 \
-sun.jvm.hotspot.debugger.dbx \
-sun.jvm.hotspot.debugger.dbx.sparc \
-sun.jvm.hotspot.debugger.dbx.x86 \
 sun.jvm.hotspot.debugger.dummy \
 sun.jvm.hotspot.debugger.ia64 \
 sun.jvm.hotspot.debugger.linux \
@@ -76,7 +74,6 @@
 sun.jvm.hotspot.debugger.remote.sparc \
 sun.jvm.hotspot.debugger.remote.x86 \
 sun.jvm.hotspot.debugger.sparc \
-sun.jvm.hotspot.debugger.win32 \
 sun.jvm.hotspot.debugger.win32.coff \
 sun.jvm.hotspot.debugger.windbg \
 sun.jvm.hotspot.debugger.windbg.amd64 \
@@ -84,6 +81,7 @@
 sun.jvm.hotspot.debugger.windbg.x86 \
 sun.jvm.hotspot.debugger.x86 \
 sun.jvm.hotspot.gc_implementation \
+sun.jvm.hotspot.gc_implementation.g1 \
 sun.jvm.hotspot.gc_implementation.parallelScavenge \
 sun.jvm.hotspot.gc_implementation.shared \
 sun.jvm.hotspot.gc_interface \
@@ -91,7 +89,9 @@
 sun.jvm.hotspot.jdi \
 sun.jvm.hotspot.livejvm \
 sun.jvm.hotspot.memory \
+sun.jvm.hotspot.opto \
 sun.jvm.hotspot.oops \
+sun.jvm.hotspot.prims \
 sun.jvm.hotspot.runtime \
 sun.jvm.hotspot.runtime.amd64 \
 sun.jvm.hotspot.runtime.ia64 \
@@ -139,6 +139,7 @@
 sun/jvm/hotspot/bugspot/*.java \
 sun/jvm/hotspot/bugspot/tree/*.java \
 sun/jvm/hotspot/c1/*.java \
+sun/jvm/hotspot/ci/*.java \
 sun/jvm/hotspot/code/*.java \
 sun/jvm/hotspot/compiler/*.java \
 sun/jvm/hotspot/debugger/*.java \
@@ -147,9 +148,6 @@
 sun/jvm/hotspot/debugger/cdbg/basic/*.java \
 sun/jvm/hotspot/debugger/cdbg/basic/amd64/*.java \
 sun/jvm/hotspot/debugger/cdbg/basic/x86/*.java \
-sun/jvm/hotspot/debugger/dbx/*.java \
-sun/jvm/hotspot/debugger/dbx/sparc/*.java \
-sun/jvm/hotspot/debugger/dbx/x86/*.java \
 sun/jvm/hotspot/debugger/dummy/*.java \
 sun/jvm/hotspot/debugger/ia64/*.java \
 sun/jvm/hotspot/debugger/linux/*.java \
@@ -165,17 +163,21 @@
 sun/jvm/hotspot/debugger/remote/sparc/*.java \
 sun/jvm/hotspot/debugger/remote/x86/*.java \
 sun/jvm/hotspot/debugger/sparc/*.java \
-sun/jvm/hotspot/debugger/win32/*.java \
 sun/jvm/hotspot/debugger/win32/coff/*.java \
 sun/jvm/hotspot/debugger/windbg/*.java \
 sun/jvm/hotspot/debugger/windbg/ia64/*.java \
 sun/jvm/hotspot/debugger/windbg/x86/*.java \
 sun/jvm/hotspot/debugger/x86/*.java \
+sun/jvm/hotspot/gc_implementation/g1/*.java \
+sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
+sun/jvm/hotspot/gc_implementation/shared/*.java \
 sun/jvm/hotspot/interpreter/*.java \
 sun/jvm/hotspot/jdi/*.java \
 sun/jvm/hotspot/livejvm/*.java \
 sun/jvm/hotspot/memory/*.java \
 sun/jvm/hotspot/oops/*.java \
+sun/jvm/hotspot/opto/*.java \
+sun/jvm/hotspot/prims/*.java \
 sun/jvm/hotspot/runtime/*.java \
 sun/jvm/hotspot/runtime/amd64/*.java \
 sun/jvm/hotspot/runtime/ia64/*.java \
--- a/agent/make/saenv.sh	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/make/saenv.sh	Thu Dec 22 15:46:11 2011 +0000
@@ -70,6 +70,14 @@
 
 SA_CLASSPATH=$STARTDIR/../build/classes:$STARTDIR/../src/share/lib/js.jar:$STARTDIR/sa.jar:$STARTDIR/lib/js.jar
 
+if [ ! -z "$SA_TYPEDB" ]; then
+  if [ ! -f $SA_TYPEDB ]; then
+    echo "$SA_TYPEDB is unreadable"
+    exit 1
+  fi
+  OPTIONS="-Dsun.jvm.hotspot.typedb=$SA_TYPEDB ${OPTIONS}"
+fi
+
 OPTIONS="-Djava.system.class.loader=sun.jvm.hotspot.SALauncherLoader ${OPTIONS}"
 
 SA_JAVA_CMD="$SA_PREFIX_CMD $SA_JAVA -showversion ${OPTIONS} -cp $SA_CLASSPATH $SA_OPTIONS"
--- a/agent/make/saenv64.sh	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/make/saenv64.sh	Thu Dec 22 15:46:11 2011 +0000
@@ -67,6 +67,14 @@
 
 SA_CLASSPATH=$STARTDIR/../build/classes:$STARTDIR/../src/share/lib/js.jar:$STARTDIR/sa.jar::$STARTDIR/lib/js.jar
 
+if [ ! -z "$SA_TYPEDB" ]; then
+  if [ ! -f $SA_TYPEDB ]; then
+    echo "$SA_TYPEDB is unreadable"
+    exit 1
+  fi
+  OPTIONS="-Dsun.jvm.hotspot.typedb=$SA_TYPEDB ${OPTIONS}"
+fi
+
 OPTIONS="-Djava.system.class.loader=sun.jvm.hotspot.SALauncherLoader ${OPTIONS}"
 
 SA_JAVA_CMD="$SA_PREFIX_CMD $SA_JAVA -d64 -showversion ${OPTIONS} -cp $SA_CLASSPATH $SA_OPTIONS"
--- a/agent/src/os/solaris/Makefile	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/os/solaris/Makefile	Thu Dec 22 15:46:11 2011 +0000
@@ -24,9 +24,7 @@
 
 
 all:
-	cd dbx; $(MAKE) all
 	cd proc; $(MAKE) all
 
 clean:
-	cd dbx; $(MAKE) clean
 	cd proc; $(MAKE) clean
--- a/agent/src/os/solaris/dbx/Makefile	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-#
-# Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-
-# Targets are:
-#   32bit:  Build the 32 bit version in ./32bit
-#   64bit:  Build the 64 bit version in ./64bit
-#   helloWorld:  Build the helloWorld test program
-#   all:    Build all of the above.  This is the default.
-#
-# NOTE: This makefile uses IOBuf.cpp, IOBuf.hpp, Buffer.cpp, and
-#           Buffer.hpp from the src/os/win32/agent directory.
-
-.PHONY: 32bit 64bit
-
-ARCH_ORIG = $(shell uname -p)
-
-# C++    := /java/devtools/$(ARCH_ORIG)/SUNWspro/SC6.1/bin/CC
-
-C++    := CC
-RM     := /usr/bin/rm
-MKDIRS := /usr/bin/mkdir -p
-
-
-WIN32_DIR := ../../win32
-ARCH     := $(subst i386,i486,$(ARCH_ORIG))
-# INCLUDES := -I/net/sparcworks.eng/export/set/sparcworks5/dbx_62_intg/dev/src/dbx -I$(WIN32_DIR)
-INCLUDES := -I. -I$(WIN32_DIR)
-CFLAGS_32bit := -xarch=v8
-CFLAGS_64bit := -xarch=v9
-CFLAGS   := -PIC -xO3 $(INCLUDES)
-LIBS     := -lsocket -lnsl -lrtld_db
-LDFLAGS  := -G
-
-ifneq "$(ARCH)" "i486"
-    CFLAGS += $(CFLAGS_$(VERSION))
-    LDFLAGS += $(CFLAGS_$(VERSION))
-endif
-
-# We use IOBuf.hpp, IOBuf.cpp, Buffer.hpp, and Buffer.cpp from the win32 dir.
-vpath %.cpp .:$(WIN32_DIR)
-vpath %.hpp .:$(WIN32_DIR)
-
-OBJS = $(VERSION)/svc_agent_dbx.o $(VERSION)/IOBuf.o $(VERSION)/Buffer.o
-
-
-
-# The default is to make both 32 bit and 64 bit versions.
-all:: 32bit 64bit
-
-32bit 64bit:: 
-	$(MKDIRS) $@
-	$(MAKE) $@/libsvc_agent_dbx.so  helloWorld VERSION=$@
-
-$(VERSION)/IOBuf.o: IOBuf.hpp
-$(VERSION)/Buffer.o: Buffer.hpp
-$(VERSION)/svc_agent_dbx.o: svc_agent_dbx.hpp
-
-$(VERSION)/%.o: %.cpp
-	$(C++) $(CFLAGS) -c $< -o $@
-
-$(VERSION)/libsvc_agent_dbx.so:: $(OBJS)
-	$(C++) $(LDFLAGS) -o $(VERSION)/libsvc_agent_dbx.so $(OBJS) $(LIBS)
-
-# Would be nice to move this into a shared directory
-helloWorld:: helloWorld.cpp
-	$(C++) -g $< -o $@
-
-clean::
-	$(RM) -rf 32bit 64bit *.o helloWorld
--- a/agent/src/os/solaris/dbx/README	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,9 +0,0 @@
-shell_impl.h
-proc_service_2.h
-
-The above files are captured from the dbx build environment.
-Rather then use a -I that points to stuff in .eng domain that
-may not be accessible in other domains these files are just
-copied here so local builds in other domains will work.
-These files rarely change so the fact that we might have to
-strobe in new ones on rare occasions is no big deal.
--- a/agent/src/os/solaris/dbx/README-commands.txt	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,82 +0,0 @@
-This import module uses a largely text-based protocol, except for
-certain bulk data transfer operations. All text is in single-byte
-US-ASCII.
-
-Commands understood:
-
-address_size                  ::= <int result>
-
-    Returns 32 if attached to 32-bit process, 64 if 64-bit.
-
-peek_fail_fast <bool arg>     ::=
-
-    Indicates whether "peek" requests should "fail fast"; that is, if
-    any of the addresses in the requested range are unmapped, report
-    the entire range as unmapped. This is substantially faster than
-    the alternative, which is to read the entire range byte-by-byte.
-    However, it should only be used when it is guaranteed by the
-    client application that peeks come from at most one page. The
-    default is that peek_fast_fail is not enabled.
-
-peek <address addr> <unsigned int numBytes> ::=
-    B<binary char success>
-       [<binary unsigned int len> <binary char isMapped> [<binary char data>]...]...
-
-    NOTE that the binary portion of this message is prefixed by the
-    uppercase US-ASCII letter 'B', allowing easier synchronization by
-    clients. There is no data between the 'B' and the rest of the
-    message.
-
-    May only be called once attached. Reads the address space of the
-    target process starting at the given address (see below for format
-    specifications) and extending the given number of bytes. Whether
-    the read succeeded is indicated by a single byte containing a 1 or
-    0 (success or failure). If successful, the return result is given
-    in a sequence of ranges. _len_, the length of each range, is
-    indicated by a 32-bit unsigned integer transmitted with big-endian
-    byte ordering (i.e., most significant byte first).  _isMapped_
-    indicates whether the range is mapped or unmapped in the target
-    process's address space, and will contain the value 1 or 0 for
-    mapped or unmapped, respectively. If the range is mapped,
-    _isMapped_ is followed by _data_, containing the raw binary data
-    for the range. The sum of all ranges' lengths is guaranteed to be
-    equivalent to the number of bytes requested.
-
-poke <address addr> <int numBytes> B[<binary char data>]... ::= <bool result>
-
-    NOTE that the binary portion of this message is prefixed by the
-    uppercase US-ASCII letter 'B', allowing easier synchronization by
-    clients. There is no data between the 'B' and the rest of the
-    message.
-
-    Writes the given data to the target process starting at the given
-    address. Returns 1 on success, 0 on failure (i.e., one or more of
-    target addresses were unmapped).
-
-mapped <address addr> <int numBytes> ::= <bool result>
-
-    Returns 1 if entire address range [address...address + int arg) is
-    mapped in target process's address space, 0 if not
-
-lookup <symbol objName> <symbol sym> ::= <address addr>
-
-    First symbol is object name; second is symbol to be looked up.
-    Looks up symbol in target process's symbol table and returns
-    address. Returns NULL (0x0) if symbol is not found.
-
-thr_gregs <int tid>                  ::= <int numAddresses> <address...>
-
-    Fetch the "general" (integer) register set for the given thread.
-    Returned as a series of hexidecimal values. NOTE: the meaning of
-    the return value is architecture-dependent. In general it is the
-    contents of the prgregset_t.
-
-exit                                 ::=
-
-    Exits the serviceability agent dbx module, returning control to
-    the dbx prompt.
-
-// Data formats and example values:
-<address>      ::=   0x12345678[9ABCDEF0] /* up to 64-bit hex value */
-<unsigned int> ::=   5                    /* up to 32-bit integer number; no leading sign */
-<bool>         ::=   1                    /* ASCII '0' or '1' */
--- a/agent/src/os/solaris/dbx/helloWorld.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include <stdio.h>
-#include <inttypes.h>
-
-extern "C" {
-  const char* helloWorldString = "Hello, world!";
-  // Do not change these values without changing TestDebugger.java as well
-  // FIXME: should make these jbyte, jshort, etc...
-  volatile int8_t  testByte     = 132;
-  volatile int16_t testShort    = 27890;
-  volatile int32_t testInt      = 1020304050;
-  volatile int64_t testLong     = 102030405060708090LL;
-  volatile float   testFloat    = 35.4F;
-  volatile double  testDouble   = 1.23456789;
-
-  volatile int helloWorldTrigger = 0;
-}
-
-int
-main(int, char**) {
-  while (1) {
-    while (helloWorldTrigger == 0) {
-    }
-
-    fprintf(stderr, "%s\n", helloWorldString);
-    fprintf(stderr, "testByte=%d\n", testByte);
-    fprintf(stderr, "testShort=%d\n", testShort);
-    fprintf(stderr, "testInt=%d\n", testInt);
-    fprintf(stderr, "testLong=%d\n", testLong);
-    fprintf(stderr, "testFloat=%d\n", testFloat);
-    fprintf(stderr, "testDouble=%d\n", testDouble);
-
-    while (helloWorldTrigger != 0) {
-    }
-  }
-}
--- a/agent/src/os/solaris/dbx/proc_service_2.h	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,172 +0,0 @@
-/*
- * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _PROC_SERVICE_2_H
-#define _PROC_SERVICE_2_H
-
-/*
- * Types, function definitions for the provider of services beyond
- * proc_service.  This interface will be used by import modules like
- * BAT/prex, NEO debugger etc.
- */
-
-/*
- CCR info
-
- Version history:
-
-        1.0       - Initial CCR release
-
-        1.1       - Changes for GLUE/neo.
-                    New entry points ps_svnt_generic() and ps_svc_generic()
-                  - New entry point ps_getpid()
-
- Release information for automatic CCR updates:
- BEGIN RELEASE NOTES: (signifies what gets put into CCR release notes)
-        1.2       - Changes to support Solaris 2.7
-
- END RELEASE NOTES: (signifies what gets put into CCR release notes)
-
- Following is used for CCR version number:
-
-#define CCR_PROC_SERVICE_2_VERSION 1.2
-
-*/
-
-
-#include <proc_service.h>
-#include <sys/types.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct ps_loadobj {
-        int     objfd;          /* fd of the load object or executable
-                                 * -1 implies its not available.
-                                 * This file decriptor is live only during the
-                                 * particular call to ps_iter_f().  If you
-                                 * need it beyond that you need to dup() it.
-                                 */
-        psaddr_t
-                text_base;      /* address where text of loadobj was mapped */
-        psaddr_t
-                data_base;      /* address where data of loadobj was mapped */
-        const char *objname;    /* loadobj name */
-};
-
-typedef int ps_iter_f(const struct ps_prochandle *, const struct ps_loadobj *,
-                        void *cd);
-
-/*
- * Returns the ps_prochandle for the current process under focus.  Returns
- * NULL if there is none.
- */
-
-const struct ps_prochandle *
-ps_get_prochandle(void);
-
-/*
- * Returns the ps_prochandle for the current process(allows core files to
- * be specified) under focus.  Returns NULL if there is none.
- */
-const struct ps_prochandle *
-ps_get_prochandle2(int cores_too);
-
-/*
- * Returns the pid of the process referred to by the ps_prochandle.
- *
- * 0 is returned in case the ps_prochandle is not valid or refers to dead
- * process.
- *
- */
-pid_t
-ps_getpid(const struct ps_prochandle *);
-
-/*
- * Iteration function that iterates over all load objects *and the
- *      executable*
- *
- *      If the callback routine returns:
- *      0 - continue processing link objects
- *      non zero - stop calling the callback function
- *
- */
-
-ps_err_e
-ps_loadobj_iter(const struct ps_prochandle *, ps_iter_f *, void *clnt_data);
-
-/*
- * Address => function name mapping
- *
- * Given an address, returns a pointer to the function's
- * linker name (null terminated).
- */
-
-ps_err_e
-ps_find_fun_name(const struct ps_prochandle *, psaddr_t addr,
-                        const char **name);
-
-/*
- * Interface to LD_PRELOAD.  LD_PRELOAD given library across the
- * program 'exec'.
- *
- */
-
-/*
- * Append/Prepend the 'lib' (has to be library name as understood by LD_PRELOAD)
- * to the LD_PRELOAD variable setting to be used by the debugee
- * Returns a cookie (in id).
- */
-ps_err_e
-ps_ld_preload_append(const char *lib, int *id);
-ps_err_e
-ps_ld_preload_prepend(const char *lib, int *id);
-
-/*
- * Remove the library associated with 'id' from the LD_PRELOAD setting.
- *
- */
-ps_err_e
-ps_ld_preload_remove(int id);
-
-#ifdef __cplusplus
-}
-#endif
-
-/*
- * The following are C++ only interfaces
- */
-#ifdef __cplusplus
-
-/*
- * classes ServiceDbx and ServantDbx and defined in "gp_dbx_svc.h" which is
- * accessed via CCR
- */
-extern class ServantDbx *ps_svnt_generic();
-extern class ServiceDbx *ps_svc_generic();
-
-#endif
-
-#endif /* _PROC_SERVICE_2_H */
--- a/agent/src/os/solaris/dbx/shell_imp.h	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHELL_IMP_H
-#define SHELL_IMP_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdio.h>
-
-/*
- CCR info
-
- Vesrion history:
-
-        1.0       - Initial CCR release
-
- Release information for automatic CCR updates:
-
- BEGIN RELEASE NOTES: (signifies what gets put into CCR release notes)
-        1.1
-                  - Entry points for va_list style msgs; new shell_imp_vmsg()
-                    and shell_imp_verrmsg()
-                  - shell_imp_env_checker() is now shell_imp_var_checker().
-                    Also the var_checker callback gets passed interp.
-        1.2       - interposition framework (used by jdbx)
-                  - access to input FILE pointer.
-
- END RELEASE NOTES: (signifies what gets put into CCR release notes)
-
-Following is used as a CCR version number:
-#define CCR_SHELL_IMP_VERSION 1.1
-*/
-
-#include <stdarg.h>
-
-#define SHELL_IMP_MAJOR 1
-#define SHELL_IMP_MINOR 2
-#define SHELL_IMP_FLAG_GLOB 0x1
-#define SHELL_IMP_FLAG_ARGQ 0x2
-
-typedef void *shell_imp_interp_t;
-typedef void *shell_imp_command_t;
-typedef int shell_imp_fun_t(shell_imp_interp_t, int, char **, void *);
-
-int
-shell_imp_init(
-    int,                /* major version number */
-    int,                /* minor version number */
-    shell_imp_interp_t, /* interpreter */
-    int,                /* argc */
-    char *[]            /* argv */
-);
-
-int
-shell_imp_fini(shell_imp_interp_t);
-
-shell_imp_command_t
-shell_imp_define_command(char *,        /* command name e.g. "tnf" */
-                    shell_imp_fun_t *,  /* callback function */
-                    int,                /* SHELL_IMP_FLAG_* bit vector */
-                    void *,             /* client_data Passed as last arg to
-                                        /* callback function */
-                    char *              /* help message, e.g. */
-                                        /* "enable the specified tnf probes" */
-            );
-
-int
-shell_imp_undefine_command(shell_imp_command_t);
-
-int
-shell_imp_var_checker(shell_imp_interp_t,
-                      const char *,         /* var name */
-                      int (*)(shell_imp_interp_t, const char*) /* env checker */
-                     );
-
-int
-shell_imp_execute(shell_imp_interp_t, const char *);
-
-const char *
-shell_imp_get_var(shell_imp_interp_t, const char *);
-
-void
-shell_imp_msg(shell_imp_interp_t, const char *, ...);
-
-void
-shell_imp_errmsg(shell_imp_interp_t, const char *, ...);
-
-void
-shell_imp_vmsg(shell_imp_interp_t, const char *, va_list);
-
-void
-shell_imp_verrmsg(shell_imp_interp_t, const char *, va_list);
-
-
-
-/*
- * Stuff added for 1.2
- */
-
-struct shell_imp_interposition_info_t {
-    shell_imp_fun_t *
-                new_func;
-    void *      new_client_data;
-    shell_imp_fun_t *
-                original_func;
-    void *      original_client_data;
-    int         original_flags;
-};
-
-typedef int shell_imp_dispatcher_t(shell_imp_interp_t, int, char **,
-                                   shell_imp_interposition_info_t *);
-
-shell_imp_command_t
-shell_imp_interpose(char *name,
-                    shell_imp_fun_t *new_func,
-                    int    flags,
-                    void *client_data,
-                    char * description,
-                    shell_imp_dispatcher_t *);
-
-int shell_imp_uninterpose(shell_imp_command_t);
-
-int
-shell_imp_dispatch_interposition(shell_imp_interp_t,
-                                 shell_imp_interposition_info_t *,
-                                 int argc, char *argv[]);
-
-int
-shell_imp_dispatch_original(shell_imp_interp_t,
-                                 shell_imp_interposition_info_t *,
-                                 int argc, char *argv[]);
-
-FILE *
-shell_imp_cur_input(shell_imp_interp_t);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
--- a/agent/src/os/solaris/dbx/svc_agent_dbx.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1068 +0,0 @@
-/*
- * Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-// This is the implementation of a very simple dbx import module which
-// handles requests from the VM which come in over a socket. The
-// higher-level Java wrapper for dbx starts the debugger, attaches to
-// the process, imports this command, and runs it. After that, the SA
-// writes commands to this agent via its own private communications
-// channel. The intent is to move away from the text-based front-end
-// completely in the near future (no more calling "debug" by printing
-// text to dbx's stdin).
-
-#include <stdio.h>
-#include <errno.h>
-#include <ctype.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <unistd.h>
-#include <string.h>
-#include <stropts.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-
-#include <proc_service.h>
-#include <sys/procfs_isa.h>
-#include <rtld_db.h>
-#include "proc_service_2.h"
-#include "svc_agent_dbx.hpp"
-
-static ServiceabilityAgentDbxModule* module = NULL;
-#define NEEDS_CLEANUP
-
-// Useful for debugging
-#define VERBOSE_DEBUGGING
-
-#ifdef VERBOSE_DEBUGGING
-# define debug_only(x) x
-#else
-# define debug_only(x)
-#endif
-
-// For profiling
-//#define PROFILING
-
-#ifdef PROFILING
-#define PROFILE_COUNT 200
-static Timer scanTimer;
-static Timer workTimer;
-static Timer writeTimer;
-static int numRequests = 0;
-#endif /* PROFILING */
-
-const char* ServiceabilityAgentDbxModule::CMD_ADDRESS_SIZE   = "address_size";
-const char* ServiceabilityAgentDbxModule::CMD_PEEK_FAIL_FAST = "peek_fail_fast";
-const char* ServiceabilityAgentDbxModule::CMD_PEEK           = "peek";
-const char* ServiceabilityAgentDbxModule::CMD_POKE           = "poke";
-const char* ServiceabilityAgentDbxModule::CMD_MAPPED         = "mapped";
-const char* ServiceabilityAgentDbxModule::CMD_LOOKUP         = "lookup";
-const char* ServiceabilityAgentDbxModule::CMD_THR_GREGS      = "thr_gregs";
-const char* ServiceabilityAgentDbxModule::CMD_EXIT           = "exit";
-
-// The initialization routines must not have C++ name mangling
-extern "C" {
-
-/** This is the initialization routine called by dbx upon importing of
-    this module. Returns 0 upon successful initialization, -1 upon
-    failure. */
-int shell_imp_init(int major, int minor,
-                   shell_imp_interp_t interp, int argc, char *argv[])
-{
-  // Ensure shell interpreter data structure is laid out the way we
-  // expect
-  if (major != SHELL_IMP_MAJOR) {
-    debug_only(fprintf(stderr, "Serviceability agent: unexpected value for SHELL_IMP_MAJOR (got %d, expected %d)\n", major, SHELL_IMP_MAJOR);)
-    return -1;
-  }
-  if (minor < SHELL_IMP_MINOR) {
-    debug_only(fprintf(stderr, "Serviceability agent: unexpected value for SHELL_IMP_MINOR (got %d, expected >= %d)\n", minor, SHELL_IMP_MINOR);)
-    return -1;
-  }
-
-  if (module != NULL) {
-    debug_only(fprintf(stderr, "Serviceability agent: module appears to already be initialized (should not happen)\n");)
-    // Already initialized. Should not happen.
-    return -1;
-  }
-
-  module = new ServiceabilityAgentDbxModule(major, minor, interp, argc, argv);
-  if (!module->install()) {
-    debug_only(fprintf(stderr, "Serviceability agent: error installing import module\n");)
-    delete module;
-    module = NULL;
-    return -1;
-  }
-
-  // Installation was successful. Next step will be for the user to
-  // enter the appropriate command on the command line, which will
-  // make the SA's dbx module wait for commands to come in over the
-  // socket.
-  return 0;
-}
-
-/** This is the routine called by dbx upon unloading of this module.
-    Returns 0 upon success, -1 upon failure. */
-int
-shell_imp_fini(shell_imp_interp_t)
-{
-  if (module == NULL) {
-    return -1;
-  }
-
-  bool res = module->uninstall();
-  delete module;
-  module = NULL;
-  if (!res) {
-    return -1;
-  }
-  return 0;
-}
-
-} // extern "C"
-
-/** This is the routine which is called by the dbx shell when the user
-    requests the serviceability agent module to run. This delegates to
-    ServiceabilityAgentDbxModule::run. This routine's signature must
-    match that of shell_imp_fun_t. */
-extern "C" {
-static int
-svc_agent_run(shell_imp_interp_t, int, char **, void *) {
-  if (module == NULL) {
-    return -1;
-  }
-
-  module->run();
-  return 0;
-}
-}
-
-/*
- * Implementation of ServiceabilityAgentDbxModule class
- */
-
-// NOTE: we need to forward declare the special "ps_get_prochandle2"
-// function which allows examination of core files as well. It isn't
-// currently in proc_service_2.h. Note also that it has name mangling
-// because it isn't declared extern "C".
-//const struct ps_prochandle *ps_get_prochandle2(int cores_too);
-
-ServiceabilityAgentDbxModule::ServiceabilityAgentDbxModule(int, int, shell_imp_interp_t interp,
-                                                           int argc, char *argv[])
-  :myComm(32768, 131072)
-{
-  _interp = interp;
-  _argc = argc;
-  _argv = argv;
-  _tdb_agent = NULL;
-  peek_fail_fast = false;
-  libThreadName = NULL;
-}
-
-ServiceabilityAgentDbxModule::~ServiceabilityAgentDbxModule() {
-  if (_command != NULL) {
-    uninstall();
-  }
-}
-
-char*
-readCStringFromProcess(psaddr_t addr) {
-  char c;
-  int num = 0;
-  ps_prochandle* cur_proc = (ps_prochandle*) ps_get_prochandle2(1);
-
-  // Search for null terminator
-  do {
-    if (ps_pread(cur_proc, addr + num, &c, 1) != PS_OK) {
-      return NULL;
-    }
-    ++num;
-  } while (c != 0);
-
-  // Allocate string
-  char* res = new char[num];
-  if (ps_pread(cur_proc, addr, res, num) != PS_OK) {
-    delete[] res;
-    return NULL;
-  }
-  return res;
-}
-
-int
-findLibThreadCB(const rd_loadobj_t* lo, void* data) {
-  ServiceabilityAgentDbxModule* module = (ServiceabilityAgentDbxModule*) data;
-  char* name = readCStringFromProcess(lo->rl_nameaddr);
-  if (strstr(name, "libthread.so") != NULL) {
-    module->libThreadName = name;
-    return 0;
-  } else {
-    delete[] name;
-    return 1;
-  }
-}
-
-bool
-ServiceabilityAgentDbxModule::install() {
-  // NOTE interdependency between here and Java side wrapper
-  // FIXME: casts of string literal to char * to match prototype
-  _command = shell_imp_define_command((char *) "svc_agent_run",
-                                      &svc_agent_run,
-                                      0,
-                                      NULL,
-                                      (char *) "Run the serviceability agent's dbx module.\n"
-                                      "This routine causes the module to listen on a socket for requests.\n"
-                                      "It does not return until the Java-side code tells it to exit, at\n"
-                                      "which point control is returned to the dbx shell.");
-  if (_command == NULL) {
-    debug_only(fprintf(stderr, "Serviceability agent: Failed to install svc_agent_run command\n"));
-    return false;
-  }
-
-  // This is fairly painful. Since dbx doesn't currently load
-  // libthread_db with RTLD_GLOBAL, we can't just use RTLD_DEFAULT for
-  // the argument to dlsym. Instead, we have to use rtld_db to search
-  // through the loaded objects in the target process for libthread.so and
-
-  // Try rtld_db
-  if (rd_init(RD_VERSION) != RD_OK) {
-    debug_only(fprintf(stderr, "Serviceability agent: Unable to init rtld_db\n"));
-    return false;
-  }
-
-  rd_agent_t* rda = rd_new((struct ps_prochandle*) ps_get_prochandle2(1));
-  if (rda == NULL) {
-    debug_only(fprintf(stderr, "Serviceability agent: Unable to allocate rtld_db agent\n"));
-    return false;
-  }
-
-  if (rd_loadobj_iter(rda, (rl_iter_f*) findLibThreadCB, this) != RD_OK) {
-    debug_only(fprintf(stderr, "Serviceability agent: Loadobject iteration failed\n"));
-    return false;
-  }
-
-  if (libThreadName == NULL) {
-    debug_only(fprintf(stderr, "Serviceability agent: Failed to find pathname to libthread.so in target process\n"));
-    return false;
-  }
-
-  // Find and open libthread_db.so
-  char* slash = strrchr(libThreadName, '/');
-  if (slash == NULL) {
-    debug_only(fprintf(stderr, "Serviceability agent: can't parse path to libthread.so \"%s\"\n"));
-    return false;
-  }
-
-  int slashPos = slash - libThreadName;
-  char* buf = new char[slashPos + strlen("libthread_db.so") + 20]; // slop
-  if (buf == NULL) {
-    debug_only(fprintf(stderr, "Serviceability agent: error allocating libthread_db.so pathname\n"));
-    return false;
-  }
-  strncpy(buf, libThreadName, slashPos + 1);
-
-  // Check dbx's data model; use sparcv9/ subdirectory if 64-bit and
-  // if target process is 32-bit
-  if ((sizeof(void*) == 8) &&
-      (strstr(libThreadName, "sparcv9") == NULL)) {
-    strcpy(buf + slashPos + 1, "sparcv9/");
-    slashPos += strlen("sparcv9/");
-  }
-
-  strcpy(buf + slashPos + 1, "libthread_db.so");
-
-  libThreadDB = dlopen(buf, RTLD_LAZY);
-  void* tmpDB = libThreadDB;
-  if (libThreadDB == NULL) {
-    debug_only(fprintf(stderr, "Serviceability agent: Warning: unable to find libthread_db.so at \"%s\"\n", buf));
-    // Would like to handle this case as well. Maybe dbx has a better
-    // idea of where libthread_db.so lies. If the problem with dbx
-    // loading libthread_db without RTLD_GLOBAL specified ever gets
-    // fixed, we could run this code all the time.
-    tmpDB = RTLD_DEFAULT;
-  }
-
-  delete[] buf;
-
-  // Initialize access to libthread_db
-  td_init_fn          = (td_init_fn_t*)          dlsym(tmpDB, "td_init");
-  td_ta_new_fn        = (td_ta_new_fn_t*)        dlsym(tmpDB, "td_ta_new");
-  td_ta_delete_fn     = (td_ta_delete_fn_t*)     dlsym(tmpDB, "td_ta_delete");
-  td_ta_map_id2thr_fn = (td_ta_map_id2thr_fn_t*) dlsym(tmpDB, "td_ta_map_id2thr");
-  td_thr_getgregs_fn  = (td_thr_getgregs_fn_t*)  dlsym(tmpDB, "td_thr_getgregs");
-
-  if (td_init_fn == NULL ||
-      td_ta_new_fn == NULL ||
-      td_ta_delete_fn == NULL ||
-      td_ta_map_id2thr_fn == NULL ||
-      td_thr_getgregs_fn == NULL) {
-    debug_only(fprintf(stderr, "Serviceability agent: Failed to find one or more libthread_db symbols:\n"));
-    debug_only(if (td_init_fn == NULL)          fprintf(stderr, "  td_init\n"));
-    debug_only(if (td_ta_new_fn == NULL)        fprintf(stderr, "  td_ta_new\n"));
-    debug_only(if (td_ta_delete_fn == NULL)     fprintf(stderr, "  td_ta_delete\n"));
-    debug_only(if (td_ta_map_id2thr_fn == NULL) fprintf(stderr, "  td_ta_map_id2thr\n"));
-    debug_only(if (td_thr_getgregs_fn == NULL)  fprintf(stderr, "  td_thr_getgregs\n"));
-    return false;
-  }
-
-  if ((*td_init_fn)() != TD_OK) {
-    debug_only(fprintf(stderr, "Serviceability agent: Failed to initialize libthread_db\n"));
-    return false;
-  }
-
-  return true;
-}
-
-bool
-ServiceabilityAgentDbxModule::uninstall() {
-  if (_command == NULL) {
-    return false;
-  }
-
-  if (libThreadDB != NULL) {
-    dlclose(libThreadDB);
-    libThreadDB = NULL;
-  }
-
-  int res = shell_imp_undefine_command(_command);
-
-  if (res != 0) {
-    return false;
-  }
-
-  return true;
-}
-
-bool
-ServiceabilityAgentDbxModule::run() {
-  // This is where most of the work gets done.
-  // The command processor loop looks like the following:
-  //  - create listening socket
-  //  - accept a connection (only one for now)
-  //  - while that connection is open and the "exit" command has not
-  //    been received:
-  //    - read command
-  //    - if it's the exit command, cleanup and return
-  //    - otherwise, process command and write result
-
-  int listening_socket = socket(AF_INET, SOCK_STREAM, 0);
-  if (listening_socket < 0) {
-    return false;
-  }
-
-  // Set the SO_REUSEADDR property on the listening socket. This
-  // prevents problems with calls to bind() to the same port failing
-  // after this process exits. This seems to work on all platforms.
-  int reuse_address = 1;
-  if (setsockopt(listening_socket, SOL_SOCKET, SO_REUSEADDR,
-                 (char *)&reuse_address, sizeof(reuse_address)) < 0) {
-    close(listening_socket);
-    return false;
-  }
-
-  sockaddr_in server_address;
-  // Build the server address. We can bind the listening socket to the
-  // INADDR_ANY internet address.
-  memset((char*)&server_address, 0, sizeof(server_address));
-  server_address.sin_family = AF_INET;
-  server_address.sin_addr.s_addr = (unsigned long)htonl(INADDR_ANY);
-  server_address.sin_port = htons((short)PORT);
-
-  // Bind socket to port
-  if (bind(listening_socket, (sockaddr*) &server_address,
-           sizeof(server_address)) < 0) {
-    close(listening_socket);
-    return false;
-  }
-
-  // Arbitrarily chosen backlog of 5 (shouldn't matter since we expect
-  // at most one connection)
-  if (listen(listening_socket, 5) < 0) {
-    close(listening_socket);
-    return false;
-  }
-
-  // OK, now ready to wait for a data connection. This call to
-  // accept() will block.
-  struct sockaddr_in client_address;
-  int address_len   = sizeof(client_address);
-  int client_socket = accept(listening_socket, (sockaddr*) &client_address,
-                         &address_len);
-  // Close listening socket regardless of whether accept() succeeded.
-  // (FIXME: this may be annoying, especially during debugging, but I
-  // really feel that robustness and multiple connections should be
-  // handled higher up, e.g., at the Java level -- multiple clients
-  // could conceivably connect to the SA via RMI, and that would be a
-  // more robust solution than implementing multiple connections at
-  // this level)
-  NEEDS_CLEANUP;
-
-  // NOTE: the call to shutdown() usually fails, so don't panic if this happens
-  shutdown(listening_socket, 2);
-
-  if (close(listening_socket) < 0) {
-    debug_only(fprintf(stderr, "Serviceability agent: Error closing listening socket\n"));
-    return false;
-  }
-
-  if (client_socket < 0) {
-    debug_only(fprintf(stderr, "Serviceability agent: Failed to open client socket\n"));
-    // No more cleanup necessary
-    return false;
-  }
-
-  // Attempt to disable TCP buffering on this socket. We send small
-  // amounts of data back and forth and don't want buffering.
-  int buffer_val = 1;
-  if (setsockopt(client_socket, IPPROTO_IP, TCP_NODELAY, (char *) &buffer_val, sizeof(buffer_val)) < 0) {
-    debug_only(fprintf(stderr, "Serviceability agent: Failed to set TCP_NODELAY option on client socket\n"));
-    cleanup(client_socket);
-    return false;
-  }
-
-  // OK, we have the data socket through which we will communicate
-  // with the Java side. Wait for commands or until reading or writing
-  // caused an error.
-
-  bool should_continue = true;
-
-  myComm.setSocket(client_socket);
-
-#ifdef PROFILING
-  scanTimer.reset();
-  workTimer.reset();
-  writeTimer.reset();
-#endif
-
-  // Allocate a new thread agent for libthread_db
-  if ((*td_ta_new_fn)((ps_prochandle*) ps_get_prochandle2(1), &_tdb_agent) !=
-      TD_OK) {
-    debug_only(fprintf(stderr, "Serviceability agent: Failed to allocate thread agent\n"));
-    cleanup(client_socket);
-    return false;
-  }
-
-  do {
-    // Decided to use text to communicate between these processes.
-    // Probably will make debugging easier -- could telnet in if
-    // necessary. Will make scanning harder, but probably doesn't
-    // matter.
-
-    // Why not just do what workshop does and parse dbx's console?
-    // Probably could do that, but at least this way we are in control
-    // of the text format on both ends.
-
-    // FIXME: should have some way of synchronizing these commands
-    // between the C and Java sources.
-
-    NEEDS_CLEANUP;
-
-    // Do a blocking read of a line from the socket.
-    char *input_buffer = myComm.readLine();
-    if (input_buffer == NULL) {
-      debug_only(fprintf(stderr, "Serviceability agent: error during read: errno = %d\n", errno));
-      debug_only(perror("Serviceability agent"));
-      // Error occurred during read.
-      // FIXME: should guard against SIGPIPE
-      cleanup(client_socket);
-      return false;
-    }
-
-    // OK, now ready to scan. See README-commands.txt for syntax
-    // descriptions.
-
-    bool res = false;
-    if (!strncmp(input_buffer, CMD_ADDRESS_SIZE, strlen(CMD_ADDRESS_SIZE))) {
-      res = handleAddressSize(input_buffer + strlen(CMD_ADDRESS_SIZE));
-    } else if (!strncmp(input_buffer, CMD_PEEK_FAIL_FAST, strlen(CMD_PEEK_FAIL_FAST))) {
-      res = handlePeekFailFast(input_buffer + strlen(CMD_PEEK_FAIL_FAST));
-    } else if (!strncmp(input_buffer, CMD_PEEK, strlen(CMD_PEEK))) {
-      res = handlePeek(input_buffer + strlen(CMD_PEEK));
-    } else if (!strncmp(input_buffer, CMD_POKE, strlen(CMD_POKE))) {
-      res = handlePoke(input_buffer + strlen(CMD_POKE));
-    } else if (!strncmp(input_buffer, CMD_MAPPED, strlen(CMD_MAPPED))) {
-      res = handleMapped(input_buffer + strlen(CMD_MAPPED));
-    } else if (!strncmp(input_buffer, CMD_LOOKUP, strlen(CMD_LOOKUP))) {
-      res = handleLookup(input_buffer + strlen(CMD_LOOKUP));
-    } else if (!strncmp(input_buffer, CMD_THR_GREGS, strlen(CMD_THR_GREGS))) {
-      res = handleThrGRegs(input_buffer + strlen(CMD_THR_GREGS));
-    } else if (!strncmp(input_buffer, CMD_EXIT, strlen(CMD_EXIT))) {
-      should_continue = false;
-    }
-
-    if (should_continue) {
-      if (!res) {
-        cleanup(client_socket);
-        return false;
-      }
-    }
-
-#ifdef PROFILING
-    if (++numRequests == PROFILE_COUNT) {
-      fprintf(stderr, "%d requests: %d ms scanning, %d ms work, %d ms writing\n",
-              PROFILE_COUNT, scanTimer.total(), workTimer.total(), writeTimer.total());
-      fflush(stderr);
-      scanTimer.reset();
-      workTimer.reset();
-      writeTimer.reset();
-      numRequests = 0;
-    }
-#endif
-
-  } while (should_continue);
-
-  // Successful exit
-  cleanup(client_socket);
-  return true;
-}
-
-void
-ServiceabilityAgentDbxModule::cleanup(int client_socket) {
-  shutdown(client_socket, 2);
-  close(client_socket);
-  if (_tdb_agent != NULL) {
-    (*td_ta_delete_fn)(_tdb_agent);
-  }
-}
-
-bool
-ServiceabilityAgentDbxModule::handleAddressSize(char* data) {
-  int data_model;
-  ps_err_e result = ps_pdmodel((ps_prochandle*) ps_get_prochandle2(1),
-                               &data_model);
-  if (result != PS_OK) {
-    myComm.writeString("0");
-    myComm.flush();
-    return false;
-  }
-
-  int val;
-  switch (data_model) {
-  case PR_MODEL_ILP32:
-    val = 32;
-    break;
-  case PR_MODEL_LP64:
-    val = 64;
-    break;
-  default:
-    val = 0;
-    break;
-  }
-
-  if (!myComm.writeInt(val)) {
-    return false;
-  }
-  if (!myComm.writeEOL()) {
-    return false;
-  }
-  return myComm.flush();
-}
-
-bool
-ServiceabilityAgentDbxModule::handlePeekFailFast(char* data) {
-  unsigned int val;
-  if (!scanUnsignedInt(&data, &val)) {
-    return false;
-  }
-  peek_fail_fast = (val ? true : false);
-  return true;
-}
-
-bool
-ServiceabilityAgentDbxModule::handlePeek(char* data) {
-  // Scan hex address, return false if failed
-  psaddr_t addr;
-#ifdef PROFILING
-  scanTimer.start();
-#endif /* PROFILING */
-  if (!scanAddress(&data, &addr)) {
-    return false;
-  }
-  unsigned int num;
-  if (!scanUnsignedInt(&data, &num)) {
-    return false;
-  }
-  if (num == 0) {
-#ifdef PROFILING
-    writeTimer.start();
-#endif /* PROFILING */
-    myComm.writeBinChar('B');
-    myComm.writeBinChar(1);
-    myComm.writeBinUnsignedInt(0);
-    myComm.writeBinChar(0);
-#ifdef PROFILING
-    writeTimer.stop();
-#endif /* PROFILING */
-    return true;
-  }
-#ifdef PROFILING
-  scanTimer.stop();
-  workTimer.start();
-#endif /* PROFILING */
-  char* buf = new char[num];
-  ps_prochandle* cur_proc = (ps_prochandle*) ps_get_prochandle2(1);
-  ps_err_e result = ps_pread(cur_proc, addr, buf, num);
-  if (result == PS_OK) {
-    // Fast case; entire read succeeded.
-#ifdef PROFILING
-    workTimer.stop();
-    writeTimer.start();
-#endif /* PROFILING */
-    myComm.writeBinChar('B');
-    myComm.writeBinChar(1);
-    myComm.writeBinUnsignedInt(num);
-    myComm.writeBinChar(1);
-    myComm.writeBinBuf(buf, num);
-#ifdef PROFILING
-    writeTimer.stop();
-#endif /* PROFILING */
-  } else {
-#ifdef PROFILING
-    workTimer.stop();
-#endif /* PROFILING */
-
-    if (peek_fail_fast) {
-#ifdef PROFILING
-    writeTimer.start();
-#endif /* PROFILING */
-      // Fail fast
-      myComm.writeBinChar('B');
-      myComm.writeBinChar(1);
-      myComm.writeBinUnsignedInt(num);
-      myComm.writeBinChar(0);
-#ifdef PROFILING
-    writeTimer.stop();
-#endif /* PROFILING */
-    } else {
-      // Slow case: try to read one byte at a time
-      // FIXME: need better way of handling this, a la VirtualQuery
-
-      unsigned int  strideLen      = 0;
-      int           bufIdx         = 0;
-      bool          lastByteMapped = (ps_pread(cur_proc, addr, buf, 1) == PS_OK ? true : false);
-
-#ifdef PROFILING
-      writeTimer.start();
-#endif /* PROFILING */
-      myComm.writeBinChar('B');
-      myComm.writeBinChar(1);
-#ifdef PROFILING
-      writeTimer.stop();
-#endif /* PROFILING */
-
-      for (int i = 0; i < num; ++i, ++addr) {
-#ifdef PROFILING
-        workTimer.start();
-#endif /* PROFILING */
-        result = ps_pread(cur_proc, addr, &buf[bufIdx], 1);
-#ifdef PROFILING
-        workTimer.stop();
-#endif /* PROFILING */
-        bool tmpMapped = (result == PS_OK ? true : false);
-#ifdef PROFILING
-        writeTimer.start();
-#endif /* PROFILING */
-        if (tmpMapped != lastByteMapped) {
-          // State change. Write the length of the last stride.
-          myComm.writeBinUnsignedInt(strideLen);
-          if (lastByteMapped) {
-            // Stop gathering data. Write the data of the last stride.
-            myComm.writeBinChar(1);
-            myComm.writeBinBuf(buf, strideLen);
-            bufIdx = 0;
-          } else {
-            // Start gathering data to write.
-            myComm.writeBinChar(0);
-          }
-          strideLen = 0;
-          lastByteMapped = tmpMapped;
-        }
-#ifdef PROFILING
-        writeTimer.stop();
-#endif /* PROFILING */
-        if (lastByteMapped) {
-          ++bufIdx;
-        }
-        ++strideLen;
-      }
-
-      // Write last stride (must be at least one byte long by definition)
-#ifdef PROFILING
-      writeTimer.start();
-#endif /* PROFILING */
-      myComm.writeBinUnsignedInt(strideLen);
-      if (lastByteMapped) {
-        myComm.writeBinChar(1);
-        myComm.writeBinBuf(buf, strideLen);
-      } else {
-        myComm.writeBinChar(0);
-      }
-#ifdef PROFILING
-      writeTimer.stop();
-#endif /* PROFILING */
-    }
-  }
-  delete[] buf;
-  myComm.flush();
-  return true;
-}
-
-bool
-ServiceabilityAgentDbxModule::handlePoke(char* data) {
-  // FIXME: not yet implemented
-  NEEDS_CLEANUP;
-  bool res = myComm.writeBoolAsInt(false);
-  myComm.flush();
-  return res;
-}
-
-bool
-ServiceabilityAgentDbxModule::handleMapped(char* data) {
-  // Scan address
-  psaddr_t addr;
-  if (!scanAddress(&data, &addr)) {
-    return false;
-  }
-  unsigned int num;
-  if (!scanUnsignedInt(&data, &num)) {
-    return false;
-  }
-  unsigned char val;
-  ps_prochandle* cur_proc = (ps_prochandle*) ps_get_prochandle2(1);
-  char* buf = new char[num];
-  if (ps_pread(cur_proc, addr, buf, num) == PS_OK) {
-    myComm.writeBoolAsInt(true);
-  } else {
-    myComm.writeBoolAsInt(false);
-  }
-  delete[] buf;
-  myComm.writeEOL();
-  myComm.flush();
-  return true;
-}
-
-extern "C"
-int loadobj_iterator(const rd_loadobj_t* loadobj, void *) {
-  if (loadobj != NULL) {
-    fprintf(stderr, "loadobj_iterator: visited loadobj \"%p\"\n", (void*) loadobj->rl_nameaddr);
-    return 1;
-  }
-
-  fprintf(stderr, "loadobj_iterator: NULL loadobj\n");
-  return 0;
-}
-
-bool
-ServiceabilityAgentDbxModule::handleLookup(char* data) {
-  // Debugging: iterate over loadobjs
-  /*
-  rd_agent_t* rld_agent = rd_new((ps_prochandle*) ps_get_prochandle2(1));
-  rd_loadobj_iter(rld_agent, &loadobj_iterator, NULL);
-  rd_delete(rld_agent);
-  */
-
-#ifdef PROFILING
-  scanTimer.start();
-#endif /* PROFILING */
-
-  char* object_name = scanSymbol(&data);
-  if (object_name == NULL) {
-    return false;
-  }
-  char* symbol_name = scanSymbol(&data);
-  if (symbol_name == NULL) {
-    delete[] object_name;
-    return false;
-  }
-
-#ifdef PROFILING
-  scanTimer.stop();
-  workTimer.start();
-#endif /* PROFILING */
-
-  ps_sym_t sym;
-  // FIXME: check return values from write routines
-  ps_prochandle* process = (ps_prochandle*) ps_get_prochandle2(1);
-  ps_err_e lookup_res = ps_pglobal_sym(process,
-                                       object_name, symbol_name, &sym);
-#ifdef PROFILING
-  workTimer.stop();
-  writeTimer.start();
-#endif /* PROFILING */
-
-  delete[] object_name;
-  delete[] symbol_name;
-  if (lookup_res != PS_OK) {
-    // This is too noisy
-    //    debug_only(fprintf(stderr, "ServiceabilityAgentDbxModule::handleLookup: error %d\n", lookup_res));
-    myComm.writeString("0x0");
-  } else {
-    myComm.writeAddress((void *)sym.st_value);
-  }
-  myComm.writeEOL();
-  myComm.flush();
-
-#ifdef PROFILING
-  writeTimer.stop();
-#endif /* PROFILING */
-
-  return true;
-}
-
-bool
-ServiceabilityAgentDbxModule::handleThrGRegs(char* data) {
-#ifdef PROFILING
-  scanTimer.start();
-#endif /* PROFILING */
-
-  unsigned int num;
-  // Get the thread ID
-  if (!scanUnsignedInt(&data, &num)) {
-    return false;
-  }
-
-#ifdef PROFILING
-  scanTimer.stop();
-  workTimer.start();
-#endif /* PROFILING */
-
-  // Map tid to thread handle
-  td_thrhandle_t thread_handle;
-  if ((*td_ta_map_id2thr_fn)(_tdb_agent, num, &thread_handle) != TD_OK) {
-    //    fprintf(stderr, "Error mapping thread ID %d to thread handle\n", num);
-    return false;
-  }
-
-  // Fetch register set
-  prgregset_t reg_set;
-  memset(reg_set, 0, sizeof(reg_set));
-  td_err_e result = (*td_thr_getgregs_fn)(&thread_handle, reg_set);
-  if ((result != TD_OK) && (result != TD_PARTIALREG)) {
-    //    fprintf(stderr, "Error fetching registers for thread handle %d: error = %d\n", num, result);
-    return false;
-  }
-
-#ifdef PROFILING
-  workTimer.stop();
-  writeTimer.start();
-#endif /* PROFILING */
-
-#if (defined(__sparc) || defined(__i386))
-  myComm.writeInt(NPRGREG);
-  myComm.writeSpace();
-  for (int i = 0; i < NPRGREG; i++) {
-    myComm.writeAddress((void *)reg_set[i]);
-    if (i == NPRGREG - 1) {
-      myComm.writeEOL();
-    } else {
-      myComm.writeSpace();
-    }
-  }
-#else
-#error  Please port ServiceabilityAgentDbxModule::handleThrGRegs to your current platform
-#endif
-
-  myComm.flush();
-
-#ifdef PROFILING
-  writeTimer.stop();
-#endif /* PROFILING */
-
-  return true;
-}
-
-//
-// Input routines
-//
-
-bool
-ServiceabilityAgentDbxModule::scanAddress(char** data, psaddr_t* addr) {
-  *addr = 0;
-
-  // Skip whitespace
-  while ((**data != 0) && (isspace(**data))) {
-    ++*data;
-  }
-
-  if (**data == 0) {
-    return false;
-  }
-
-  if (strncmp(*data, "0x", 2) != 0) {
-    return false;
-  }
-
-  *data += 2;
-
-  while ((**data != 0) && (!isspace(**data))) {
-    int val;
-    bool res = charToNibble(**data, &val);
-    if (!res) {
-      return false;
-    }
-    *addr <<= 4;
-    *addr |= val;
-    ++*data;
-  }
-
-  return true;
-}
-
-bool
-ServiceabilityAgentDbxModule::scanUnsignedInt(char** data, unsigned int* num) {
-  *num = 0;
-
-  // Skip whitespace
-  while ((**data != 0) && (isspace(**data))) {
-    ++*data;
-  }
-
-  if (**data == 0) {
-    return false;
-  }
-
-  while ((**data != 0) && (!isspace(**data))) {
-    char cur = **data;
-    if ((cur < '0') || (cur > '9')) {
-      return false;
-    }
-    *num *= 10;
-    *num += cur - '0';
-    ++*data;
-  }
-
-  return true;
-}
-
-char*
-ServiceabilityAgentDbxModule::scanSymbol(char** data) {
-  // Skip whitespace
-  while ((**data != 0) && (isspace(**data))) {
-    ++*data;
-  }
-
-  if (**data == 0) {
-    return NULL;
-  }
-
-  // First count length
-  int len = 1; // Null terminator
-  char* tmpData = *data;
-  while ((*tmpData != 0) && (!isspace(*tmpData))) {
-    ++tmpData;
-    ++len;
-  }
-  char* buf = new char[len];
-  strncpy(buf, *data, len - 1);
-  buf[len - 1] = 0;
-  *data += len - 1;
-  return buf;
-}
-
-bool
-ServiceabilityAgentDbxModule::charToNibble(char ascii, int* value) {
-  if (ascii >= '0' && ascii <= '9') {
-    *value = ascii - '0';
-    return true;
-  } else if (ascii >= 'A' && ascii <= 'F') {
-    *value = 10 + ascii - 'A';
-    return true;
-  } else if (ascii >= 'a' && ascii <= 'f') {
-    *value = 10 + ascii - 'a';
-    return true;
-  }
-
-  return false;
-}
-
-
-char*
-ServiceabilityAgentDbxModule::readCStringFromProcess(psaddr_t addr) {
-  char c;
-  int num = 0;
-  ps_prochandle* cur_proc = (ps_prochandle*) ps_get_prochandle2(1);
-
-  // Search for null terminator
-  do {
-    if (ps_pread(cur_proc, addr + num, &c, 1) != PS_OK) {
-      return NULL;
-    }
-    ++num;
-  } while (c != 0);
-
-  // Allocate string
-  char* res = new char[num];
-  if (ps_pread(cur_proc, addr, res, num) != PS_OK) {
-    delete[] res;
-    return NULL;
-  }
-  return res;
-}
-
-
-//--------------------------------------------------------------------------------
-// Class Timer
-//
-
-Timer::Timer() {
-  reset();
-}
-
-Timer::~Timer() {
-}
-
-void
-Timer::start() {
-  gettimeofday(&startTime, NULL);
-}
-
-void
-Timer::stop() {
-  struct timeval endTime;
-  gettimeofday(&endTime, NULL);
-  totalMicroseconds += timevalDiff(&startTime, &endTime);
-  ++counter;
-}
-
-long
-Timer::total() {
-  return (totalMicroseconds / 1000);
-}
-
-long
-Timer::average() {
-  return (long) ((double) total() / (double) counter);
-}
-
-void
-Timer::reset() {
-  totalMicroseconds = 0;
-  counter = 0;
-}
-
-long long
-Timer::timevalDiff(struct timeval* start, struct timeval* end) {
-  long long secs = end->tv_sec - start->tv_sec;
-  secs *= 1000000;
-  long long usecs = end->tv_usec - start->tv_usec;
-  return (secs + usecs);
-}
--- a/agent/src/os/solaris/dbx/svc_agent_dbx.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "shell_imp.h"
-#include "IOBuf.hpp"
-#include <sys/time.h>
-#include <thread_db.h>
-
-typedef td_err_e td_init_fn_t();
-typedef td_err_e td_ta_new_fn_t(struct ps_prochandle *, td_thragent_t **);
-typedef td_err_e td_ta_delete_fn_t(td_thragent_t *);
-typedef td_err_e td_ta_map_id2thr_fn_t(const td_thragent_t *, thread_t,  td_thrhandle_t *);
-typedef td_err_e td_thr_getgregs_fn_t(const td_thrhandle_t *, prgregset_t);
-
-class ServiceabilityAgentDbxModule {
-public:
-  ServiceabilityAgentDbxModule(int major, int minor,
-                               shell_imp_interp_t interp, int argc, char *argv[]);
-  ~ServiceabilityAgentDbxModule();
-
-  bool install();
-  bool uninstall();
-
-  /* This is invoked through the dbx command interpreter. It listens
-     on a socket for commands and does not return until it receives an
-     "exit" command. At that point control is returned to dbx's main
-     loop, at which point if the user sends an exit command to dbx's
-     shell the dbx process will exit. Returns true if completed
-     successfully, false if an error occurred while running (for
-     example, unable to bind listening socket). */
-  bool run();
-
-private:
-
-  // This must be shared between the Java and C layers
-  static const int PORT = 21928;
-
-  // Command handlers
-  bool handleAddressSize(char* data);
-  bool handlePeekFailFast(char* data);
-  bool handlePeek(char* data);
-  bool handlePoke(char* data);
-  bool handleMapped(char* data);
-  bool handleLookup(char* data);
-  bool handleThrGRegs(char* data);
-
-  // Input routines
-
-  // May mutate addr argument even if result is false
-  bool scanAddress(char** data, psaddr_t* addr);
-  // May mutate num argument even if result is false
-  bool scanUnsignedInt(char** data, unsigned int* num);
-  // Returns NULL if error occurred while scanning. Otherwise, returns
-  // newly-allocated character array which must be freed with delete[].
-  char* scanSymbol(char** data);
-  // Helper routine: converts ASCII to 4-bit integer. Returns true if
-  // character is in range, false otherwise.
-  bool charToNibble(char ascii, int* value);
-
-  // Output routines
-
-  // Writes an int with no leading or trailing spaces
-  bool writeInt(int val, int fd);
-  // Writes an address in hex format with no leading or trailing
-  // spaces
-  bool writeAddress(psaddr_t addr, int fd);
-  // Writes a register in hex format with no leading or trailing
-  // spaces (addresses and registers might be of different size)
-  bool writeRegister(prgreg_t reg, int fd);
-  // Writes a space to given file descriptor
-  bool writeSpace(int fd);
-  // Writes carriage return to given file descriptor
-  bool writeCR(int fd);
-  // Writes a bool as [0|1]
-  bool writeBoolAsInt(bool val, int fd);
-  // Helper routine: converts low 4 bits to ASCII [0..9][A..F]
-  char nibbleToChar(unsigned char nibble);
-
-  // Base routine called by most of the above
-  bool writeString(const char* str, int fd);
-
-  // Writes a binary character
-  bool writeBinChar(char val, int fd);
-  // Writes a binary unsigned int in network (big-endian) byte order
-  bool writeBinUnsignedInt(unsigned int val, int fd);
-  // Writes a binary buffer
-  bool writeBinBuf(char* buf, int size, int fd);
-
-  // Routine to flush the socket
-  bool flush(int client_socket);
-
-  void cleanup(int client_socket);
-
-  // The shell interpreter on which we can invoke commands (?)
-  shell_imp_interp_t _interp;
-
-  // The "command line" arguments passed to us by dbx (?)
-  int _argc;
-  char **_argv;
-
-  // The installed command in the dbx shell
-  shell_imp_command_t _command;
-
-  // Access to libthread_db (dlsym'ed to be able to pick up the
-  // version loaded by dbx)
-  td_init_fn_t*          td_init_fn;
-  td_ta_new_fn_t*        td_ta_new_fn;
-  td_ta_delete_fn_t*     td_ta_delete_fn;
-  td_ta_map_id2thr_fn_t* td_ta_map_id2thr_fn;
-  td_thr_getgregs_fn_t*  td_thr_getgregs_fn;
-
-  // Our "thread agent" -- access to libthread_db
-  td_thragent_t* _tdb_agent;
-
-  // Path to libthread.so in target process; free with delete[]
-  char* libThreadName;
-
-  // Handle to dlopen'ed libthread_db.so
-  void* libThreadDB;
-
-  // Helper callback for finding libthread_db.so
-  friend int findLibThreadCB(const rd_loadobj_t* lo, void* data);
-
-  // Support for reading C strings out of the target process (so we
-  // can find the correct libthread_db). Returns newly-allocated char*
-  // which must be freed with delete[], or null if the read failed.
-  char* readCStringFromProcess(psaddr_t addr);
-
-  IOBuf myComm;
-
-  // Output buffer support (used by writeString, writeChar, flush)
-  char* output_buffer;
-  int output_buffer_size;
-  int output_buffer_pos;
-
-  // "Fail fast" flag
-  bool peek_fail_fast;
-
-  // Commands
-  static const char* CMD_ADDRESS_SIZE;
-  static const char* CMD_PEEK_FAIL_FAST;
-  static const char* CMD_PEEK;
-  static const char* CMD_POKE;
-  static const char* CMD_MAPPED;
-  static const char* CMD_LOOKUP;
-  static const char* CMD_THR_GREGS;
-  static const char* CMD_EXIT;
-};
-
-// For profiling. Times reported are in milliseconds.
-class Timer {
-public:
-  Timer();
-  ~Timer();
-
-  void start();
-  void stop();
-  long total();
-  long average();
-  void reset();
-
-private:
-  struct timeval startTime;
-  long long totalMicroseconds; // stored internally in microseconds
-  int counter;
-  long long timevalDiff(struct timeval* startTime, struct timeval* endTime);
-};
--- a/agent/src/os/win32/BasicList.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _BASIC_LIST_
-#define _BASIC_LIST_
-
-#include <vector>
-
-template<class T>
-class BasicList {
-protected:
-  typedef std::vector<T> InternalListType;
-  InternalListType internalList;
-
-public:
-  BasicList() {
-  }
-  virtual ~BasicList() {
-  }
-
-  void add(T arg) {
-    internalList.push_back(arg);
-  }
-
-  bool remove(T arg) {
-    for (InternalListType::iterator iter = internalList.begin();
-         iter != internalList.end(); iter++) {
-      if (*iter == arg) {
-        internalList.erase(iter);
-        return true;
-      }
-    }
-    return false;
-  }
-
-  int size() {
-    return internalList.size();
-  }
-
-  T get(int index) {
-    return internalList[index];
-  }
-};
-
-#endif  // #defined _BASIC_LIST_
--- a/agent/src/os/win32/Buffer.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "Buffer.hpp"
-
-#include <string.h>
-
-Buffer::Buffer(int bufSize) {
-  buf = new char[bufSize];
-  sz = bufSize;
-  fill = 0;
-  drain = 0;
-}
-
-Buffer::~Buffer() {
-  delete[] buf;
-}
-
-char*
-Buffer::fillPos() {
-  return buf + fill;
-}
-
-int
-Buffer::remaining() {
-  return sz - fill;
-}
-
-int
-Buffer::size() {
-  return sz;
-}
-
-bool
-Buffer::incrFillPos(int amt) {
-  if (fill + amt >= sz) {
-    return false;
-  }
-  fill += amt;
-  return true;
-}
-
-int
-Buffer::readByte() {
-  if (drain < fill) {
-    return buf[drain++] & 0xFF;
-  } else {
-    return -1;
-  }
-}
-
-int
-Buffer::readBytes(char* data, int len) {
-  int numRead = 0;
-  while (numRead < len) {
-    int c = readByte();
-    if (c < 0) break;
-    data[numRead++] = (char) c;
-  }
-  return numRead;
-}
-
-char*
-Buffer::drainPos() {
-  return buf + drain;
-}
-
-int
-Buffer::drainRemaining() {
-  return fill - drain;
-}
-
-bool
-Buffer::incrDrainPos(int amt) {
-  if (drainRemaining() < amt) {
-    return false;
-  }
-  drain += amt;
-  return true;
-}
-
-void
-Buffer::compact() {
-  // Copy down data
-  memmove(buf, buf + drain, fill - drain);
-  // Adjust positions
-  fill -= drain;
-  drain = 0;
-}
--- a/agent/src/os/win32/Buffer.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _BUFFER_
-#define _BUFFER_
-
-// A Buffer is the backing store for the IOBuf abstraction and
-// supports producer-consumer filling and draining.
-
-class Buffer {
-public:
-  Buffer(int bufSize);
-  ~Buffer();
-
-  char* fillPos();   // Position of the place where buffer should be filled
-  int   remaining(); // Number of bytes that can be placed starting at fillPos
-  int   size();      // Size of the buffer
-  // Move up fill position by amount (decreases remaining()); returns
-  // false if not enough space
-  bool  incrFillPos(int amt);
-
-  // Read single byte (0..255); returns -1 if no data available.
-  int   readByte();
-  // Read multiple bytes, non-blocking (this buffer does not define a
-  // fill mechanism), into provided buffer. Returns number of bytes read.
-  int   readBytes(char* buf, int len);
-
-  // Access to drain position. Be very careful using this.
-  char* drainPos();
-  int   drainRemaining();
-  bool  incrDrainPos(int amt);
-
-  // Compact buffer, removing already-consumed input. This must be
-  // called periodically to yield the illusion of an infinite buffer.
-  void  compact();
-
-private:
-  Buffer(const Buffer&);
-  Buffer& operator=(const Buffer&);
-
-  char* buf;
-  int   sz;
-  int   fill;
-  int   drain;
-};
-
-#endif // #defined _BUFFER_
--- a/agent/src/os/win32/Dispatcher.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include <stdio.h>
-#include <string.h>
-#include "dispatcher.hpp"
-
-const char* CMD_ASCII         = "ascii";
-const char* CMD_UNICODE       = "unicode";
-const char* CMD_PROCLIST      = "proclist";
-const char* CMD_ATTACH        = "attach";
-const char* CMD_DETACH        = "detach";
-const char* CMD_LIBINFO       = "libinfo";
-const char* CMD_PEEK          = "peek";
-const char* CMD_POKE          = "poke";
-const char* CMD_THREADLIST    = "threadlist";
-const char* CMD_DUPHANDLE     = "duphandle";
-const char* CMD_CLOSEHANDLE   = "closehandle";
-const char* CMD_GETCONTEXT    = "getcontext";
-const char* CMD_SETCONTEXT    = "setcontext";
-const char* CMD_SELECTORENTRY = "selectorentry";
-const char* CMD_SUSPEND       = "suspend";
-const char* CMD_RESUME        = "resume";
-const char* CMD_POLLEVENT     = "pollevent";
-const char* CMD_CONTINUEEVENT = "continueevent";
-const char* CMD_EXIT          = "exit";
-
-// Uncomment the #define below to get messages on stderr
-// #define DEBUGGING
-
-void
-Dispatcher::dispatch(char* cmd, Handler* handler) {
-  if (!strncmp(cmd, CMD_ASCII, strlen(CMD_ASCII))) {
-    handler->ascii(cmd + strlen(CMD_ASCII));
-
-  } else if (!strncmp(cmd, CMD_UNICODE, strlen(CMD_UNICODE))) {
-    handler->unicode(cmd + strlen(CMD_UNICODE));
-
-  } else if (!strncmp(cmd, CMD_PROCLIST, strlen(CMD_PROCLIST))) {
-    handler->procList(cmd + strlen(CMD_PROCLIST));
-
-  } else if (!strncmp(cmd, CMD_ATTACH, strlen(CMD_ATTACH))) {
-    handler->attach(cmd + strlen(CMD_ATTACH));
-
-  } else if (!strncmp(cmd, CMD_DETACH, strlen(CMD_DETACH))) {
-    handler->detach(cmd + strlen(CMD_DETACH));
-
-  } else if (!strncmp(cmd, CMD_LIBINFO, strlen(CMD_LIBINFO))) {
-    handler->libInfo(cmd + strlen(CMD_LIBINFO));
-
-  } else if (!strncmp(cmd, CMD_PEEK, strlen(CMD_PEEK))) {
-    handler->peek(cmd + strlen(CMD_PEEK));
-
-  } else if (!strncmp(cmd, CMD_POKE, strlen(CMD_POKE))) {
-    handler->poke(cmd + strlen(CMD_POKE));
-
-  } else if (!strncmp(cmd, CMD_THREADLIST, strlen(CMD_THREADLIST))) {
-    handler->threadList(cmd + strlen(CMD_THREADLIST));
-
-  } else if (!strncmp(cmd, CMD_DUPHANDLE, strlen(CMD_DUPHANDLE))) {
-    handler->dupHandle(cmd + strlen(CMD_DUPHANDLE));
-
-  } else if (!strncmp(cmd, CMD_CLOSEHANDLE, strlen(CMD_CLOSEHANDLE))) {
-    handler->closeHandle(cmd + strlen(CMD_CLOSEHANDLE));
-
-  } else if (!strncmp(cmd, CMD_GETCONTEXT, strlen(CMD_GETCONTEXT))) {
-    handler->getContext(cmd + strlen(CMD_GETCONTEXT));
-
-  } else if (!strncmp(cmd, CMD_SETCONTEXT, strlen(CMD_SETCONTEXT))) {
-    handler->setContext(cmd + strlen(CMD_SETCONTEXT));
-
-  } else if (!strncmp(cmd, CMD_SELECTORENTRY, strlen(CMD_SELECTORENTRY))) {
-    handler->selectorEntry(cmd + strlen(CMD_SELECTORENTRY));
-
-  } else if (!strncmp(cmd, CMD_SUSPEND, strlen(CMD_SUSPEND))) {
-    handler->suspend(cmd + strlen(CMD_SUSPEND));
-
-  } else if (!strncmp(cmd, CMD_RESUME, strlen(CMD_RESUME))) {
-    handler->resume(cmd + strlen(CMD_RESUME));
-
-  } else if (!strncmp(cmd, CMD_POLLEVENT, strlen(CMD_POLLEVENT))) {
-    handler->pollEvent(cmd + strlen(CMD_POLLEVENT));
-
-  } else if (!strncmp(cmd, CMD_CONTINUEEVENT, strlen(CMD_CONTINUEEVENT))) {
-    handler->continueEvent(cmd + strlen(CMD_CONTINUEEVENT));
-
-  } else if (!strcmp(cmd, CMD_EXIT)) {
-    handler->exit(cmd + strlen(CMD_EXIT));
-  }
-
-#ifdef DEBUGGING
-  else fprintf(stderr, "Ignoring illegal command \"%s\"\n", cmd);
-#endif
-}
--- a/agent/src/os/win32/Dispatcher.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _DISPATCHER_
-#define _DISPATCHER_
-
-#include "Handler.hpp"
-
-/** This class understands the commands supported by the system and
-    calls the appropriate handler routines. */
-
-class Dispatcher {
-public:
-  static void dispatch(char* cmd, Handler* handler);
-};
-
-#endif  // #defined _DISPATCHER_
--- a/agent/src/os/win32/Handler.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _HANDLER_
-#define _HANDLER_
-
-/** An abstract base class encapsulating the handlers for all commands
-    understood by the system. */
-class Handler {
-public:
-  virtual void ascii(char* arg)         = 0;
-  virtual void unicode(char* arg)       = 0;
-  virtual void procList(char* arg)      = 0;
-  virtual void attach(char* arg)        = 0;
-  virtual void detach(char* arg)        = 0;
-  virtual void libInfo(char* arg)       = 0;
-  virtual void peek(char* arg)          = 0;
-  virtual void poke(char* arg)          = 0;
-  virtual void threadList(char* arg)    = 0;
-  virtual void dupHandle(char* arg)     = 0;
-  virtual void closeHandle(char* arg)   = 0;
-  virtual void getContext(char* arg)    = 0;
-  virtual void setContext(char* arg)    = 0;
-  virtual void selectorEntry(char* arg) = 0;
-  virtual void suspend(char* arg)       = 0;
-  virtual void resume(char* arg)        = 0;
-  virtual void pollEvent(char* arg)     = 0;
-  virtual void continueEvent(char* arg) = 0;
-  virtual void exit(char* arg)          = 0;
-};
-
-#endif  // #defined _HANDLER_
--- a/agent/src/os/win32/IOBuf.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,490 +0,0 @@
-/*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include <stdio.h>
-
-// This file is currently used for os/solaris/agent too.  At some point in time
-// the source will be reorganized to avoid these ifdefs.
-
-#ifdef __sun
-  #include <string.h>
-  #include <inttypes.h>
-  #include <sys/byteorder.h>
-#endif
-
-#include "IOBuf.hpp"
-
-// Formats for printing pointers
-#ifdef _LP64
-#  define INTPTR_FORMAT "0x%016lx"
-#else /* ! _LP64 */
-#  define INTPTR_FORMAT "0x%08lx"
-#endif /* _LP64 */
-
-// Uncomment the #define below to get messages on stderr
-// #define DEBUGGING
-
-IOBuf::IOBuf(int inLen, int outLen) {
-  inBuf = new Buffer(inLen);
-  outBuf = new Buffer(outLen);
-  fd = INVALID_SOCKET;
-  outHandle = NULL;
-  usingSocket = true;
-  reset();
-}
-
-IOBuf::~IOBuf() {
-  delete inBuf;
-  delete outBuf;
-}
-
-void
-IOBuf::setSocket(SOCKET sock) {
-  fd = sock;
-  usingSocket = true;
-}
-
-// Reading/writing files is only needed and used on windows.
-#ifdef WIN32
-void
-IOBuf::setOutputFileHandle(HANDLE handle) {
-  outHandle = handle;
-  usingSocket = false;
-}
-#endif
-
-void
-IOBuf::reset() {
-  gotDataLastTime = false;
-  state          = TEXT_STATE;
-  binPos         = 0;
-  binLength      = 0;
-}
-
-IOBuf::ReadLineResult
-IOBuf::tryReadLine() {
-  return doReadLine(false);
-}
-
-char*
-IOBuf::readLine() {
-  ReadLineResult rr = doReadLine(true);
-  if (rr != RL_GOT_DATA) {
-    return NULL;
-  }
-  return getLine();
-}
-
-IOBuf::ReadLineResult
-IOBuf::doReadLine(bool shouldWait) {
-
-  if (!usingSocket) {
-    return IOBuf::RL_ERROR;
-  }
-
-  if (gotDataLastTime) {
-    curLine.clear();
-  }
-
-  int c;
-  do {
-    c = readChar(shouldWait);
-    if (c >= 0) {
-      Action act = processChar((char) c);
-      if (act == GOT_LINE) {
-        curLine.push_back('\0');
-        gotDataLastTime = true;
-        return IOBuf::RL_GOT_DATA;
-      } else if (act == SKIP_EOL_CHAR) {
-        // Do nothing
-      } else {
-        curLine.push_back((char) c);
-      }
-    }
-  } while (shouldWait || c >= 0);
-
-  gotDataLastTime = false;
-  return IOBuf::RL_NO_DATA;
-}
-
-bool
-IOBuf::flushImpl(bool moreDataToCome) {
-  int numWritten = 0;
-
-#ifdef WIN32
-  // When running on Windows and using IOBufs for inter-process
-  // communication, we need to write metadata into the stream
-  // indicating how many bytes are coming down. Five bytes are written
-  // per flush() call, four containing the integer number of bytes
-  // coming (not including the five-byte header) and one (a 0 or 1)
-  // indicating whether there is more data coming.
-  if (!usingSocket) {
-    int numToWrite = outBuf->drainRemaining();
-    char moreToCome = (moreDataToCome ? 1 : 0);
-    DWORD numBytesWritten;
-    if (!WriteFile(outHandle, &numToWrite, sizeof(int), &numBytesWritten, NULL)) {
-      return false;
-    }
-    if (numBytesWritten != sizeof(int)) {
-      return false;
-    }
-    if (!WriteFile(outHandle, &moreToCome, 1, &numBytesWritten, NULL)) {
-      return false;
-    }
-    if (numBytesWritten != 1) {
-      return false;
-    }
-  }
-#endif
-
-  while (outBuf->drainRemaining() != 0) {
-#ifdef DEBUGGING
-      fprintf(stderr, "Flushing %d bytes\n", outBuf->drainRemaining());
-#endif
-    if (usingSocket) {
-      numWritten = send(fd, outBuf->drainPos(), outBuf->drainRemaining(), 0);
-    } else {
-#ifdef WIN32
-      DWORD numBytesWritten;
-      if (!WriteFile(outHandle, outBuf->drainPos(), outBuf->drainRemaining(), &numBytesWritten, NULL)) {
-        numWritten = -1;
-      } else {
-        numWritten = numBytesWritten;
-      }
-#endif
-    }
-    if (numWritten != -1) {
-#ifdef DEBUGGING
-      fprintf(stderr, "Flushed %d bytes\n", numWritten);
-#endif
-      outBuf->incrDrainPos(numWritten);
-    } else {
-      return false;
-    }
-  }
-
-  outBuf->compact();
-
-  return true;
-}
-
-int
-IOBuf::readChar(bool block) {
-  do {
-    int c = inBuf->readByte();
-    if (c >= 0) {
-      return c;
-    }
-    // See whether we need to compact the input buffer
-    if (inBuf->remaining() < inBuf->size() / 2) {
-      inBuf->compact();
-    }
-    // See whether socket is ready
-    fd_set fds;
-    FD_ZERO(&fds);
-    FD_SET(fd, &fds);
-    struct timeval timeout;
-    timeout.tv_sec = 0;
-    timeout.tv_usec = 0;
-    if (block || select(1 + fd, &fds, NULL, NULL, &timeout) > 0) {
-      if (block || FD_ISSET(fd, &fds)) {
-#ifdef DEBUGGING
-        int b = (block ? 1 : 0);
-        fprintf(stderr, "calling recv: block = %d\n", b);
-#endif
-        // Read data from socket
-        int numRead = recv(fd, inBuf->fillPos(), inBuf->remaining(), 0);
-        if (numRead < 0) {
-#ifdef DEBUGGING
-          fprintf(stderr, "recv failed\n");
-#endif
-          return -1;
-        }
-        inBuf->incrFillPos(numRead);
-      }
-    }
-  } while (block);
-
-  return inBuf->readByte();
-}
-
-char*
-IOBuf::getLine() {
-#ifdef DEBUGGING
-  fprintf(stderr, "Returning (first 10 chars) \"%.10s\"\n", curLine.begin());
-#endif
-  return curLine.begin();
-}
-
-bool
-IOBuf::flush() {
-  return flushImpl(false);
-}
-
-bool
-IOBuf::writeString(const char* str) {
-  int len = strlen(str);
-
-  if (len > outBuf->size()) {
-    return false;
-  }
-
-  if (len > outBuf->remaining()) {
-    if (!flushImpl(true)) {
-      return false;
-    }
-  }
-
-  // NOTE we do not copy the null terminator of the string.
-
-  strncpy(outBuf->fillPos(), str, len);
-  outBuf->incrFillPos(len);
-  return true;
-}
-
-bool
-IOBuf::writeInt(int val) {
-  char buf[128];
-  sprintf(buf, "%d", val);
-  return writeString(buf);
-}
-
-bool
-IOBuf::writeUnsignedInt(unsigned int val) {
-  char buf[128];
-  sprintf(buf, "%u", val);
-  return writeString(buf);
-}
-
-bool
-IOBuf::writeBoolAsInt(bool val) {
-  if (val) {
-    return writeString("1");
-  } else {
-    return writeString("0");
-  }
-}
-
-bool
-IOBuf::writeAddress(void* val) {
-  char buf[128];
-  sprintf(buf, INTPTR_FORMAT, val);
-  return writeString(buf);
-}
-
-bool
-IOBuf::writeSpace() {
-  return writeString(" ");
-}
-
-bool
-IOBuf::writeEOL() {
-  return writeString("\n\r");
-}
-
-bool
-IOBuf::writeBinChar(char c) {
-  return writeBinBuf((char*) &c, sizeof(c));
-}
-
-bool
-IOBuf::writeBinUnsignedShort(unsigned short i) {
-  i = htons(i);
-  return writeBinBuf((char*) &i, sizeof(i));
-}
-
-bool
-IOBuf::writeBinUnsignedInt(unsigned int i) {
-  i = htonl(i);
-  return writeBinBuf((char*) &i, sizeof(i));
-}
-
-bool
-IOBuf::writeBinBuf(char* buf, int size) {
-  while (size > 0) {
-    int spaceRemaining = outBuf->remaining();
-    if (spaceRemaining == 0) {
-      if (!flushImpl(true)) {
-        return false;
-      }
-      spaceRemaining = outBuf->remaining();
-    }
-    int toCopy = (size > spaceRemaining) ? spaceRemaining : size;
-    memcpy(outBuf->fillPos(), buf, toCopy);
-    outBuf->incrFillPos(toCopy);
-    buf += toCopy;
-    size -= toCopy;
-    if (size > 0) {
-      if (!flushImpl(true)) {
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-#ifdef WIN32
-IOBuf::FillState
-IOBuf::fillFromFileHandle(HANDLE fh, DWORD* numBytesRead) {
-  int totalToRead;
-  char moreToCome;
-
-  outBuf->compact();
-
-  DWORD numRead;
-  if (!ReadFile(fh, &totalToRead, sizeof(int), &numRead, NULL)) {
-    return FAILED;
-  }
-  if (numRead != sizeof(int)) {
-    return FAILED;
-  }
-  if (!ReadFile(fh, &moreToCome, 1, &numRead, NULL)) {
-    return FAILED;
-  }
-  if (numRead != 1) {
-    return FAILED;
-  }
-  if (outBuf->remaining() < totalToRead) {
-    return FAILED;
-  }
-
-  int tmp = totalToRead;
-
-  while (totalToRead > 0) {
-    if (!ReadFile(fh, outBuf->fillPos(), totalToRead, &numRead, NULL)) {
-      return FAILED;
-    }
-    outBuf->incrFillPos((int) numRead);
-    totalToRead -= numRead;
-  }
-
-  *numBytesRead = tmp;
-  return ((moreToCome == 0) ? DONE : MORE_DATA_PENDING);
-}
-#endif
-
-bool
-IOBuf::isBinEscapeChar(char c) {
-  return (c == '|');
-}
-
-IOBuf::Action
-IOBuf::processChar(char c) {
-  Action action = NO_ACTION;
-  switch (state) {
-  case TEXT_STATE: {
-    // Looking for text char, bin escape char, or EOL
-    if (isBinEscapeChar(c)) {
-#ifdef DEBUGGING
-      fprintf(stderr, "[a: '%c'] ", inBuf[0]);
-#endif
-      binPos = 0;
-#ifdef DEBUGGING
-      fprintf(stderr, "[b: '%c'] ", inBuf[0]);
-#endif
-      binLength = 0;
-#ifdef DEBUGGING
-      fprintf(stderr, "[c: '%c'] ", inBuf[0]);
-#endif
-      state = BIN_STATE;
-#ifdef DEBUGGING
-      fprintf(stderr, "[d: '%c'] ", inBuf[0]);
-#endif
-#ifdef DEBUGGING
-      fprintf(stderr, "\nSwitching to BIN_STATE\n");
-#endif
-    } else if (isEOL(c)) {
-      state = EOL_STATE;
-      action = GOT_LINE;
-#ifdef DEBUGGING
-      fprintf(stderr, "\nSwitching to EOL_STATE (GOT_LINE)\n");
-#endif
-    }
-#ifdef DEBUGGING
-    else {
-      fprintf(stderr, "'%c' ", c);
-      fflush(stderr);
-    }
-#endif
-    break;
-  }
-
-  case BIN_STATE: {
-    // Seeking to finish read of input
-    if (binPos < 4) {
-      int cur = c & 0xFF;
-      binLength <<= 8;
-      binLength |= cur;
-      ++binPos;
-    } else {
-#ifdef DEBUGGING
-      fprintf(stderr, "Reading binary byte %d of %d\n",
-              binPos - 4, binLength);
-#endif
-      ++binPos;
-      if (binPos == 4 + binLength) {
-        state = TEXT_STATE;
-#ifdef DEBUGGING
-        fprintf(stderr, "Switching to TEXT_STATE\n");
-#endif
-      }
-    }
-    break;
-  }
-
-  case EOL_STATE: {
-    // More EOL characters just cause us to re-enter this state
-    if (isEOL(c)) {
-      action = SKIP_EOL_CHAR;
-    } else if (isBinEscapeChar(c)) {
-      binPos = 0;
-      binLength = 0;
-      state = BIN_STATE;
-    } else {
-      state = TEXT_STATE;
-#ifdef DEBUGGING
-      fprintf(stderr, "'%c' ", c);
-      fflush(stderr);
-#endif
-    }
-    break;
-  }
-
-  } // switch
-
-  return action;
-}
-
-
-bool
-IOBuf::isEOL(char c) {
-#ifdef WIN32
-  return ((c == '\n') || (c == '\r'));
-#elif defined(__sun)
-  return c == '\n';
-#else
-  #error Please port isEOL() to your platform
-  return false;
-#endif
-}
--- a/agent/src/os/win32/IOBuf.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,222 +0,0 @@
-/*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _IO_BUF_
-#define _IO_BUF_
-
-// This file is currently used for os/solaris/agent/ too.  At some point in time
-// the source will be reorganized to avoid these ifdefs.
-// Note that this class can read/write from a file as well as a socket.  This
-// file capability is only implemented on win32.
-
-#ifdef WIN32
-  #include <winsock2.h>
-#else
-  #include <sys/types.h>
-  #include <sys/socket.h>
-  // These are from win32 winsock2.h
-  typedef unsigned int SOCKET;
-  typedef void * HANDLE;
-  typedef unsigned long DWORD;
-  #define INVALID_SOCKET (SOCKET)(~0)
-#endif
-
-#include <vector>
-#include "Buffer.hpp"
-
-/** Manages an input/output buffer pair for a socket or file handle. */
-class IOBuf {
-public:
-  IOBuf(int inBufLen, int outBufLen);
-  ~IOBuf();
-
-  enum ReadLineResult {
-    RL_GOT_DATA,
-    RL_NO_DATA,
-    RL_ERROR
-  };
-
-  /** Change the socket with which this buffer is associated */
-  void setSocket(SOCKET sock);
-
-  // Reading/writing files is only supported on windows.
-#ifdef WIN32
-  /** Change the output file handle with which this buffer is
-      associated. Currently IOBufs can not be used to read from a file
-      handle. */
-  void setOutputFileHandle(HANDLE handle);
-#endif
-
-  /** Reset the input and output buffers, without flushing the output
-      data to the socket */
-  void reset();
-
-  /** Try to read a line of data from the given socket without
-      blocking. If was able to read a complete line of data, returns a
-      character pointer to the beginning of the (null-terminated)
-      string. If not, returns NULL, but maintains enough state that
-      subsequent calls to tryReadLine() will not ignore the data
-      already read. NOTE: this skips end-of-line characters (typically
-      CR/LF) as defined by "isEOL()". When switching back and forth
-      between binary and text modes, to be sure no data is lost, pad
-      the beginning and end of the binary transmission with bytes
-      which can not be confused with these characters. */
-  ReadLineResult tryReadLine();
-
-  /** Read a line of data from the given socket, blocking until a
-      line, including EOL, appears.  Return the line, or NULL if
-      something goes wrong. */
-  char *readLine();
-
-  /** Get the pointer to the beginning of the (null-terminated) line.
-      This should only be called if tryReadLine() has returned
-      RL_GOT_DATA. This sets the "parsing cursor" to the beginning of
-      the line. */
-  char* getLine();
-
-  // NOTE: any further data-acquisition routines must ALWAYS call
-  // fixupData() at the beginning!
-
-  //----------------------------------------------------------------------
-  // Output routines
-  //
-
-  /** Flush the output buffer to the socket. Returns true if
-      succeeded, false if write error occurred. */
-  bool flush();
-
-  /** Write the given string to the output buffer. May flush if output
-      buffer becomes too full to store the data. Not guaranteed to
-      work if string is longer than the size of the output buffer.
-      Does not include the null terminator of the string. Returns true
-      if succeeded, false if write error occurred. */
-  bool writeString(const char* str);
-
-  /** Write the given int to the output buffer. May flush if output
-      buffer becomes too full to store the data. Returns true if
-      succeeded, false if write error occurred. */
-  bool writeInt(int val);
-
-  /** Write the given unsigned int to the output buffer. May flush if
-      output buffer becomes too full to store the data. Returns true
-      if succeeded, false if write error occurred. */
-  bool writeUnsignedInt(unsigned int val);
-
-  /** Write the given boolean to the output buffer. May flush if
-      output buffer becomes too full to store the data. Returns true
-      if succeeded, false if write error occurred. */
-  bool writeBoolAsInt(bool val);
-
-  /** Write the given address to the output buffer. May flush if
-      output buffer becomes too full to store the data. Returns true
-      if succeeded, false if write error occurred. */
-  bool writeAddress(void* val);
-
-  /** Writes a space to the output buffer. May flush if output buffer
-      becomes too full to store the data. Returns true if succeeded,
-      false if write error occurred. */
-  bool writeSpace();
-
-  /** Writes an end-of-line sequence to the output buffer. May flush
-      if output buffer becomes too full to store the data. Returns
-      true if succeeded, false if write error occurred. */
-  bool writeEOL();
-
-  /** Writes a binary character to the output buffer. */
-  bool writeBinChar(char c);
-
-  /** Writes a binary unsigned short in network (big-endian) byte
-      order to the output buffer. */
-  bool writeBinUnsignedShort(unsigned short i);
-
-  /** Writes a binary unsigned int in network (big-endian) byte order
-      to the output buffer. */
-  bool writeBinUnsignedInt(unsigned int i);
-
-  /** Writes a binary buffer to the output buffer. */
-  bool writeBinBuf(char* buf, int size);
-
-#ifdef WIN32
-  enum FillState {
-    DONE = 1,
-    MORE_DATA_PENDING = 2,
-    FAILED = 3
-  };
-
-  /** Very specialized routine; fill the output buffer from the given
-      file handle. Caller is responsible for ensuring that there is
-      data to be read on the file handle. */
-  FillState fillFromFileHandle(HANDLE fh, DWORD* numRead);
-#endif
-
-  /** Binary utility routine (for poke) */
-  static bool isBinEscapeChar(char c);
-
-private:
-  IOBuf(const IOBuf&);
-  IOBuf& operator=(const IOBuf&);
-
-  // Returns -1 if non-blocking and no data available
-  int readChar(bool block);
-  // Line-oriented reading
-  std::vector<char> curLine;
-  bool gotDataLastTime;
-
-  ReadLineResult doReadLine(bool);
-
-  bool flushImpl(bool moreDataToCome);
-
-  SOCKET fd;
-  HANDLE outHandle;
-  bool usingSocket;
-
-  // Buffers
-  Buffer* inBuf;
-  Buffer* outBuf;
-
-  // Simple finite-state machine to handle binary data
-  enum State {
-    TEXT_STATE,
-    BIN_STATE,
-    EOL_STATE
-  };
-  enum Action {
-    NO_ACTION,
-    GOT_LINE,     // TEXT_STATE -> EOL_STATE transition
-    SKIP_EOL_CHAR // EOL_STATE -> EOL_STATE transition
-  };
-
-  State state;
-  Action processChar(char c);
-
-  // Handling incoming binary buffers (poke command)
-  int   binPos;    // Number of binary characters read so far;
-                   // total number to read is binLength + 4
-  int   binLength; // Number of binary characters in message;
-                   // not valid until binPos >= 4
-
-  bool isEOL(char c);
-};
-
-#endif  // #defined _IO_BUF_
--- a/agent/src/os/win32/LockableList.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _LOCKABLE_LIST_
-#define _LOCKABLE_LIST_
-
-#include <windows.h>
-#include "BasicList.hpp"
-
-template<class T>
-class LockableList : public BasicList<T> {
-private:
-  CRITICAL_SECTION crit;
-
-public:
-  LockableList() {
-    InitializeCriticalSection(&crit);
-  }
-
-  ~LockableList() {
-    DeleteCriticalSection(&crit);
-  }
-
-  void lock() {
-    EnterCriticalSection(&crit);
-  }
-
-  void unlock() {
-    LeaveCriticalSection(&crit);
-  }
-};
-
-#endif  // #defined _LOCKABLE_LIST_
--- a/agent/src/os/win32/Makefile	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-#
-# Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-SERVER=SwDbgSrv.exe
-SUBPROCESS=SwDbgSub.exe
-
-SERVER_SOURCES =   \
-  Buffer.cpp       \
-  Dispatcher.cpp   \
-  initWinsock.cpp  \
-  IOBuf.cpp        \
-  ioUtils.cpp      \
-  isNT4.cpp        \
-  nt4internals.cpp \
-  procList.cpp     \
-  Reaper.cpp       \
-  SwDbgSrv.cpp     \
-  serverLists.cpp  \
-  toolHelp.cpp
-
-SUBPROCESS_SOURCES = \
-  SwDbgSub.cpp       \
-  Buffer.cpp         \
-  IOBuf.cpp          \
-  isNT4.cpp          \
-  libInfo.cpp        \
-  Monitor.cpp        \
-  nt4internals.cpp   \
-  toolHelp.cpp
-
-SERVER_OBJS     = $(SERVER_SOURCES:.cpp=.obj)
-SUBPROCESS_OBJS = $(SUBPROCESS_SOURCES:.cpp=.obj)
-
-CPP=cl.exe
-LINK32=link.exe
-
-# These do not need to be optimized (don't run a lot of code) and it
-# will be useful to have the assertion checks in place
-
-CFLAGS=/nologo /MD /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
-
-LIBS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib          \
-     ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib kernel32.lib user32.lib gdi32.lib     \
-     winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib \
-     odbccp32.lib ws2_32.lib  /nologo /subsystem:console /debug /machine:I386
-
-default: $(SERVER) $(SUBPROCESS)
-
-$(SERVER): $(SERVER_OBJS)
-	$(LINK32) /out:$@ $(SERVER_OBJS) $(LIBS)
-
-$(SUBPROCESS): $(SUBPROCESS_OBJS)
-	$(LINK32) /out:$@ $(SUBPROCESS_OBJS) $(LIBS)
-
-clean:
-	rm -f *.obj *.idb *.pch *.pdb *.ncb *.opt *.plg *.exe *.ilk
-
-.cpp.obj:
-	@ $(CPP) $(CFLAGS) /o $@ $<
--- a/agent/src/os/win32/Message.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _MESSAGE_
-#define _MESSAGE_
-
-// These are the commands sent from the server to the child processes
-// over the child processes' stdin pipes. A subset of the commands
-// understood by the overall system, these require responses from the
-// child process. Having a data structure rather than sending text
-// simplifies parsing on the child side. The child replies by sending
-// back fully-formatted replies which are copied by the server process
-// to the clients' sockets.
-
-struct PeekArg {
-  DWORD address;
-  DWORD numBytes;
-};
-
-// NOTE: when sending a PokeArg to the child process, we handle the
-// buffer specially
-struct PokeArg {
-  DWORD address;
-  DWORD numBytes;
-  void* data;
-};
-
-// Used for continueevent
-struct BoolArg {
-  bool val;
-};
-
-// Used for duphandle, closehandle, and getcontext
-struct HandleArg {
-  HANDLE handle;
-};
-
-// Used for setcontext
-const int NUM_REGS_IN_CONTEXT = 22;
-struct SetContextArg {
-  HANDLE handle;
-  DWORD  Eax;
-  DWORD  Ebx;
-  DWORD  Ecx;
-  DWORD  Edx;
-  DWORD  Esi;
-  DWORD  Edi;
-  DWORD  Ebp;
-  DWORD  Esp;
-  DWORD  Eip;
-  DWORD  Ds;
-  DWORD  Es;
-  DWORD  Fs;
-  DWORD  Gs;
-  DWORD  Cs;
-  DWORD  Ss;
-  DWORD  EFlags;
-  DWORD  Dr0;
-  DWORD  Dr1;
-  DWORD  Dr2;
-  DWORD  Dr3;
-  DWORD  Dr6;
-  DWORD  Dr7;
-};
-
-// Used for selectorentry
-struct SelectorEntryArg {
-  HANDLE handle;
-  DWORD  selector;
-};
-
-struct Message {
-  typedef enum {
-    ATTACH,
-    DETACH,
-    LIBINFO,
-    PEEK,
-    POKE,
-    THREADLIST,
-    DUPHANDLE,
-    CLOSEHANDLE,
-    GETCONTEXT,
-    SETCONTEXT,
-    SELECTORENTRY,
-    SUSPEND,
-    RESUME,
-    POLLEVENT,
-    CONTINUEEVENT
-  } Type;
-
-  Type type;
-  union {
-    PeekArg          peekArg;
-    PokeArg          pokeArg;
-    BoolArg          boolArg;
-    HandleArg        handleArg;
-    SetContextArg    setContextArg;
-    SelectorEntryArg selectorArg;
-  };
-};
-
-#endif  // #defined _MESSAGE_
--- a/agent/src/os/win32/Monitor.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,176 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include <stdio.h>
-#include <assert.h>
-#include "Monitor.hpp"
-
-Monitor::Monitor() {
-  _lock_count = -1;       // No threads have entered the critical section
-  _owner = NULL;
-  _lock_event = CreateEvent(NULL, false, false, NULL);
-  _wait_event = CreateEvent(NULL, true, false, NULL);
-  _counter = 0;
-  _tickets = 0;
-  _waiters = 0;
-}
-
-Monitor::~Monitor() {
-  assert(_owner == NULL);    // Otherwise, owned monitor being deleted
-  assert(_lock_count == -1); // Otherwise, monitor being deleted with non -1 lock count
-  CloseHandle(_lock_event);
-  CloseHandle(_wait_event);
-}
-
-void
-Monitor::lock() {
-  if (InterlockedIncrement(&_lock_count) == 0) {
-    // Success, we now own the lock
-  } else {
-    DWORD dwRet = WaitForSingleObject((HANDLE)_lock_event,  INFINITE);
-    assert(dwRet == WAIT_OBJECT_0); // Unexpected return value from WaitForSingleObject
-  }
-  assert(owner() == NULL); // Otherwise, lock count and owner are inconsistent
-  setOwner(GetCurrentThread());
-}
-
-void
-Monitor::unlock() {
-  setOwner(NULL);
-  if (InterlockedDecrement(&_lock_count) >= 0) {
-    // Wake a waiting thread up
-    DWORD dwRet = SetEvent(_lock_event);
-    assert(dwRet != 0); // Unexpected return value from SetEvent
-  }
-}
-
-bool
-Monitor::wait(long timeout) {
-  assert(owner() != NULL);
-  assert(owner() == GetCurrentThread());
-
-  // 0 means forever. Convert to Windows specific code.
-  DWORD timeout_value = (timeout == 0) ? INFINITE : timeout;
-  DWORD which;
-
-  long c = _counter;
-  bool retry = false;
-
-  _waiters++;
-  // Loop until condition variable is signaled.  The event object is
-  // set whenever the condition variable is signaled, and tickets will
-  // reflect the number of threads which have been notified. The counter
-  // field is used to make sure we don't respond to notifications that
-  // have occurred *before* we started waiting, and is incremented each
-  // time the condition variable is signaled.
-
-  while (true) {
-
-    // Leave critical region
-    unlock();
-
-    // If this is a retry, let other low-priority threads have a chance
-    // to run.  Make sure that we sleep outside of the critical section.
-    if (retry) {
-      Sleep(1);
-    } else {
-      retry = true;
-    }
-
-    which = WaitForSingleObject(_wait_event, timeout_value);
-    // Enter critical section
-    lock();
-
-    if (_tickets != 0 && _counter != c) break;
-
-    if (which == WAIT_TIMEOUT) {
-      --_waiters;
-      return true;
-    }
-  }
-  _waiters--;
-
-  // If this was the last thread to be notified, then we need to reset
-  // the event object.
-  if (--_tickets == 0) {
-    ResetEvent(_wait_event);
-  }
-
-  return false;
-}
-
-// Notify a single thread waiting on this monitor
-bool
-Monitor::notify() {
-  assert(ownedBySelf()); // Otherwise, notify on unknown thread
-
-  if (_waiters > _tickets) {
-    if (!SetEvent(_wait_event)) {
-      return false;
-    }
-    _tickets++;
-    _counter++;
-  }
-
-  return true;
-}
-
-// Notify all threads waiting on this monitor
-bool
-Monitor::notifyAll() {
-  assert(ownedBySelf()); // Otherwise, notifyAll on unknown thread
-
-  if (_waiters > 0) {
-    if (!SetEvent(_wait_event)) {
-      return false;
-    }
-    _tickets = _waiters;
-    _counter++;
-  }
-
-  return true;
-}
-
-HANDLE
-Monitor::owner() {
-  return _owner;
-}
-
-void
-Monitor::setOwner(HANDLE owner) {
-  if (owner != NULL) {
-    assert(_owner == NULL);                 // Setting owner thread of already owned monitor
-    assert(owner == GetCurrentThread());    // Else should not be doing this
-  } else {
-    HANDLE oldOwner = _owner;
-    assert(oldOwner != NULL);               // Removing the owner thread of an unowned mutex
-    assert(oldOwner == GetCurrentThread());
-  }
-  _owner = owner;
-}
-
-bool
-Monitor::ownedBySelf() {
-  return (_owner == GetCurrentThread());
-}
--- a/agent/src/os/win32/Monitor.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _MONITOR_
-#define _MONITOR_
-
-#include <windows.h>
-
-class Monitor {
-public:
-  Monitor();
-  ~Monitor();
-
-  void lock();
-  void unlock();
-  // Default time is forever (i.e, zero). Returns true if it times-out, otherwise
-  // false.
-  bool wait(long timeout = 0);
-  bool notify();
-  bool notifyAll();
-
-private:
-  HANDLE owner();
-  void setOwner(HANDLE owner);
-  bool ownedBySelf();
-
-  HANDLE _owner;
-  long   _lock_count;
-  HANDLE _lock_event;   // Auto-reset event for blocking in lock()
-  HANDLE _wait_event;   // Manual-reset event for notifications
-  long _counter;        // Current number of notifications
-  long _waiters;        // Number of threads waiting for notification
-  long _tickets;        // Number of waiters to be notified
-};
-
-
-#endif  // #defined _MONITOR_
--- a/agent/src/os/win32/README-commands.txt	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,246 +0,0 @@
-This debug server uses a largely text-based protocol, except for
-certain bulk data transfer operations. All text is in single-byte
-US-ASCII except for the strings returned in "proclist".
-
-NOTE that the character '|' (vertical bar) is used as an escape
-character to switch the incoming data stream to the debug server into
-binary mode, so no text command may contain that character.
-
-Commands understood:
-
-ascii <EOL>                 ::=
-
-    Changes to ASCII mode. This affects all outgoing strings. At
-    startup the system is in unicode mode.
-
-unicode <EOL>               ::=
-
-    Changes to UNICODE mode. This affects all outgoing strings. This
-    is the default mode upon startup.
-
-proclist <EOL>              ::=
-      <int num> [<unsigned int pid> <int charSize> <int numChars> [<binary char_t name>]...]... <EOL>
-
-    Returns integer indicating number of processes to follow, followed
-    by (pid, name) pairs. Names are given by (charSize, numChars,
-    [char_t]...) tuples; charSize indicates the size of each character
-    in bytes, numChars the number of characters in the string, and
-    name the raw data for the string. Each individual character of the
-    string, if multi-byte, is transmitted in network byte order.
-    numChars and name are guaranteed to be separated by precisely one
-    US-ASCII space. If process list is not available because of
-    limitations of the underlying operating system, number of
-    processes returned is 0.
-
-attach <int pid> <EOL>      ::= <bool result> <EOL>
-
-    Attempts to attach to the specified process. Returns 1 if
-    successful, 0 if not. Will fail if already attached or if the
-    process ID does not exist. Attaching to a process causes the
-    process to be suspended.
-
-detach <EOL>                ::= <bool result> <EOL>
-
-    Detaches from the given process. Attaching and detaching multiple
-    times during a debugging session is allowed. Detaching causes the
-    process to resume execution.
-
-libinfo <EOL>               ::=
-      <int numLibs> [<int charSize> <int numChars> [<binary char_t name>]... <address baseAddr>]... <EOL>
-
-    May only be called once attached and the target process must be
-    suspended; otherwise, returns 0. Returns list of the full path
-    names of all of the loaded modules (including the executable
-    image) in the target process, as well as the base address at which
-    each module was relocated. See proclist for format of strings, but
-    NOTE that charSize is ALWAYS 1 for this particular routine,
-    regardless of the setting of ASCII/UNICODE.
-
-peek <address addr> <unsigned int numBytes> <EOL> ::=
-     B<binary char success>
-      [<binary unsigned int len> <binary char isMapped> [<binary char data>]...]...
-
-    NOTE that the binary portion of this message is prefixed by the
-    uppercase US-ASCII letter 'B', allowing easier synchronization by
-    clients. There is no data between the 'B' and the rest of the
-    message.
-
-    May only be called once attached. Reads the address space of the
-    target process starting at the given address (see below for format
-    specifications) and extending the given number of bytes. Whether
-    the read succeeded is indicated by a single byte containing a 1 or
-    0 (success or failure). If successful, the return result is given
-    in a sequence of ranges. _len_, the length of each range, is
-    indicated by a 32-bit unsigned integer transmitted with big-endian
-    byte ordering (i.e., most significant byte first).  _isMapped_
-    indicates whether the range is mapped or unmapped in the target
-    process's address space, and will contain the value 1 or 0 for
-    mapped or unmapped, respectively. If the range is mapped,
-    _isMapped_ is followed by _data_, containing the raw binary data
-    for the range. The sum of all ranges' lengths is guaranteed to be
-    equivalent to the number of bytes requested.
-
-poke <address addr> |[<binary unsigned int len> [<binary char data>]] <EOL> ::=
-     <bool result> <EOL>
-
-    NOTE that the binary portion of this message is prefixed by the
-    uppercase US-ASCII character '|' (vertical bar), allowing easier
-    synchronization by the server. There is no data between the '|'
-    and the rest of the message. ('B' is not used here because
-    addresses can contain that letter; no alphanumeric characters are
-    used because some of the parsing routines are used by the Solaris
-    SA port, and in that port any alphanumeric character can show up
-    as a part of a symbol being looked up.)
-
-    May only be called once attached. Writes the address space of the
-    target process starting at the given address (see below for format
-    specifications), extending the given number of bytes, and
-    containing the given data. The number of bytes is a 32-bit
-    unsigned integer transmitted with big-endian byte ordering (i.e.,
-    most significant byte first). This is followed by the raw binary
-    data to be placed at that address. The number of bytes of data
-    must match the number of bytes specified in the message.
-
-    Returns true if the write succeeded; false if it failed, for
-    example because a portion of the region was not mapped in the
-    target address space.
-
-threadlist <EOL>            ::= <int numThreads> [<address threadHandle>...] <EOL>
-
-    May only be called once attached and the target process must be
-    suspended; otherwise, returns 0. If available, returns handles for
-    all of the threads in the target process. These handles may be
-    used as arguments to the getcontext and selectorentry
-    commands. They do not need to be (and should not be) duplicated
-    via the duphandle command and must not be closed via the
-    closehandle command.
-
-duphandle <address handle> <EOL> ::=
-    <bool success> [<address duplicate>] <EOL>
-
-    Duplicates a HANDLE read from the target process's address space.
-    HANDLE is a Windows construct (typically typedef'd to void *).
-    The returned handle should ultimately be closed via the
-    closehandle command; failing to do so can cause resource leaks.
-
-    The purpose of this command is to allow the debugger to read the
-    value of a thread handle from the target process and query its
-    register set and thread selector entries via the getcontext and
-    selectorentry commands, below; such use implies that the target
-    program has its own notion of the thread list, and further, that
-    the debugger has a way of locating that thread list.
-
-closehandle <address handle> <EOL> ::=
-
-    Closes a handle retrieved via the duphandle command, above.
-
-getcontext <address threadHandle> <EOL> ::= <bool success> [<context>] <EOL>
-    
-    Returns the context for the given thread. The handle must either
-    be one of the handles returned from the threadlist command or the
-    result of duplicating a thread handle out of the target process
-    via the duphandle command. The target process must be suspended.
-
-    The context is returned as a series of hex values which represent
-    the following x86 registers in the following order:
-      EAX, EBX, ECX, EDX, ESI, EDI, EBP, ESP, EIP, DS, ES, FS, GS,
-      CS, SS, EFLAGS, DR0, DR1, DR2, DR3, DR6, DR7
-
-    FIXME: needs to be generalized and/or specified for other
-    architectures.
-
-setcontext <address threadHandle> <context> ::= <bool success> <EOL>
-
-    Sets the context of the given thread. The target process must be
-    suspended. See the getcontext command for the ordering of the
-    registers in the context.
-
-    Even if the setcontext command succeeds, some of the bits in some
-    of the registers (like the global enable bits in the debug
-    registers) may be overridden by the operating system. To ensure
-    the debugger's notion of the register set is up to date, it is
-    recommended to follow up a setcontext with a getcontext.
-
-selectorentry <address threadHandle> <int selector> <EOL> ::=
-    <bool success>
-    [<address limitLow> <address baseLow>
-     <address baseMid>  <address flags1>
-     <address flags2>   <address baseHi>] <EOL>
-
-    Retrieves a descriptor table entry for the given thread and
-    selector. This data structure allows conversion of a
-    segment-relative address to a linear virtual address. It is most
-    useful for locating the Thread Information Block for a given
-    thread handle to be able to find that thread's ID, to be able to
-    understand whether two different thread handles in fact refer to
-    the same underlying thread.
-
-    This command will only work on the X86 architecture and will
-    return false for the success flag (with no additional information
-    sent) on other architectures.
-
-suspend                     ::=
-
-    Suspends the target process. Must be attached to a target process.
-    A process is suspended when attached to via the attach command. If
-    the target process is already suspended then this command has no
-    effect.
-
-resume                      ::=
-
-    Resumes the target process without detaching from it. Must be
-    attached to a target process. After resuming a target process, the
-    debugger client must be prepared to poll for events from the
-    target process fairly frequently in order for execution in the
-    target process to proceed normally. If the target process is
-    already resumed then this command has no effect.
-
-pollevent                   ::=
-    <bool eventPresent> [<address threadHandle> <unsigned int eventCode>]
-
-  Additional entries in result for given eventCode:
-
-    LOAD/UNLOAD_DLL_DEBUG_EVENT: <address baseOfDLL>
-    EXCEPTION_DEBUG_EVENT:       <unsigned int exceptionCode> <address faultingPC>
-
-      Additional entries for given exceptionCode:
-
-         EXCEPTION_ACCESS_VIOLATION: <bool wasWrite> <address faultingAddress>
-
-    <EOL>
-
-    Polls once to see whether a debug event has been generated by the
-    target process. If none is present, returns 0 immediately.
-    Otherwise, returns 1 along with a series of textual information
-    about the event. The event is not cleared, and the thread resumed,
-    until the continueevent command is sent, or the debugger client
-    detaches from the target process.
-
-    Typically a debugger client will suspend the target process upon
-    reception of a debug event. Otherwise, it is not guaranteed that
-    all threads will be suspended upon reception of a debug event, and
-    any operations requiring that threads be suspended (including
-    fetching the context for the thread which generated the event)
-    will fail.
-
-continueevent <bool passEventToClient> ::= <bool success> <EOL>
-
-    Indicates that the current debug event has been used by the
-    debugger client and that the target process should be resumed. The
-    passEventToClient flag indicates whether the event should be
-    propagated to the target process. Breakpoint and single-step
-    events should not be propagated to the target. Returns false if
-    there was no pending event, true otherwise.
-
-exit <EOL>
-
-    Exits this debugger session.
-
-Format specifications:
-
-// Data formats and example values:
-<EOL>          ::=   end of line (typically \n on Unix platforms, or \n\r on Windows)
-<address>      ::=   0x12345678[9ABCDEF0] /* up to 64-bit hex value */
-<unsigned int> ::=   5                    /* up to 32-bit integer number; no leading sign */
-<bool>         ::=   1                    /* ASCII '0' or '1' */
-<context>      ::=   <address> ...
--- a/agent/src/os/win32/README.txt	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-This is a "Simple Windows Debug Server" written for the purpose of
-enabling the Serviceability Agent on Win32. It has backends both for
-Windows NT 4.0 (using internal Windows APIs for a few routines) as
-well as for 95/98/ME/2000 via the Tool Help APIs.
-
-The reason this debug server is necessary is that the Win32 debug APIs
-by design tear down the target process when the debugger exits (see
-knowledge base article Q164205 on msdn.microsoft.com). On Solaris, one
-can attach to and detach from a process with no effect; this is key to
-allowing dbx and gcore to work.
-
-The Simple Windows Debug Server effectively implements attach/detach
-functionality for arbitrary debug clients. This allows the SA to
-attach non-destructively to a process, and will enable gcore for Win32
-to be written shortly. While the debugger (the "client" in all of the
-source code) is attached, the target process is suspended. (Note that
-the debug server could be extended to support resumption of the target
-process and transmission of debug events over to the debugger, but
-this has been left for the future.)
-
-The makefile (type "nmake") builds two executables: SwDbgSrv.exe,
-which is the server process, and SwDbgSub.exe, which is forked by the
-server and should not be directly invoked by the user.
-
-The intent is that these two executables can be installed into
-C:\WINNT\SYSTEM32 and SwDbgSrv installed to run as a service (on NT),
-for example using ServiceInstaller (http://www.kcmultimedia.com/smaster/). 
-However, SwDbgSrv can also be run from the command line. It generates
-no text output unless the source code is changed to enable debugging
-printouts. As long as any processes which have been attached to by the
-SA are alive, the SwDbgSrv and any forked SwDbgSub processes must be
-left running. Terminating them will cause termination of the target
-processes.
-
-The debug server opens port 27000 and accepts incoming connections
-from localhost only. The security model assumes that if one can run a
-process on the given machine then one basically has access to most or
-all of the machine's facilities; this seems to be in line with the
-standard Windows security model. The protocol used is text-based, so
-one can debug the debug server using telnet. See README-commands.txt
-for documentation on the supported commands.
-
-Testing indicates that the performance impact of attaching to a
-process (and therefore permanently attaching a debugger) is minimal.
-Some serious performance problems had been seen which ultimately
-appeared to be a lack of physical memory on the machine running the
-system.
-
-Bugs:
-
-This debug server is fundamentally incompatible with the Visual C++
-debugger. Once the debug server is used to attach to a process, the
-Visual C++ IDE will not be able to attach to the same process (even if
-the debug server is "detached" from that process). Note that this
-system is designed to work with the same primitives that C and C++
-debuggers use (like "symbol lookup" and "read from process memory")
-and exposes these primitives to Java, so in the long term we could
-solve this problem by implementing platform-specific debug symbol
-parsing and a platform-independent C++ debugger in Java.
-
-Note:
-
-The files IOBuf.cpp and IOBuf.hpp are also used in 
-building src/os/solaris/agent.
--- a/agent/src/os/win32/Reaper.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,159 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include <iostream>
-#include "Reaper.hpp"
-
-using namespace std;
-
-Reaper::Reaper(ReaperCB* cb) {
-  InitializeCriticalSection(&crit);
-  event = CreateEvent(NULL, TRUE, FALSE, NULL);
-  this->cb = cb;
-
-  active = false;
-  shouldShutDown = false;
-}
-
-bool
-Reaper::start() {
-  bool result = false;
-
-  EnterCriticalSection(&crit);
-
-  if (!active) {
-    DWORD id;
-    HANDLE reaper = CreateThread(NULL, 0, &Reaper::reaperThreadEntry,
-                                 this, 0, &id);
-    if (reaper != NULL) {
-      result = true;
-    }
-  }
-
-  LeaveCriticalSection(&crit);
-
-  return result;
-}
-
-bool
-Reaper::stop() {
-  bool result = false;
-
-  EnterCriticalSection(&crit);
-
-  if (active) {
-    shouldShutDown = true;
-    SetEvent(event);
-    while (active) {
-      Sleep(1);
-    }
-    shouldShutDown = false;
-    result = true;
-  }
-
-  LeaveCriticalSection(&crit);
-
-  return result;
-}
-
-void
-Reaper::registerProcess(HANDLE processHandle, void* userData) {
-  ProcessInfo info;
-
-  info.handle = processHandle;
-  info.userData = userData;
-
-  EnterCriticalSection(&crit);
-
-  procInfo.push_back(info);
-  SetEvent(event);
-
-  LeaveCriticalSection(&crit);
-}
-
-void
-Reaper::reaperThread() {
-  while (!shouldShutDown) {
-    // Take atomic snapshot of the current process list and user data
-    EnterCriticalSection(&crit);
-
-    int num = procInfo.size();
-    HANDLE* handleList = new HANDLE[1 + num];
-    void**  dataList   = new void*[num];
-    for (int i = 0; i < num; i++) {
-      handleList[i] = procInfo[i].handle;
-      dataList[i]   = procInfo[i].userData;
-    }
-
-    LeaveCriticalSection(&crit);
-
-    // Topmost handle becomes the event object, so other threads can
-    // signal this one to notice differences in the above list (or
-    // shut down)
-    handleList[num] = event;
-
-    // Wait for these objects
-    DWORD idx = WaitForMultipleObjects(1 + num, handleList,
-                                       FALSE, INFINITE);
-    if ((idx >= WAIT_OBJECT_0) && (idx <= WAIT_OBJECT_0 + num)) {
-      idx -= WAIT_OBJECT_0;
-      if (idx < num) {
-        // A process exited (i.e., it wasn't that we were woken up
-        // just because the event went off)
-        (*cb)(dataList[idx]);
-        // Remove this process from the list (NOTE: requires that
-        // ordering does not change, i.e., that all additions are to
-        // the back of the process list)
-        EnterCriticalSection(&crit);
-
-        std::vector<ProcessInfo>::iterator iter = procInfo.begin();
-        iter += idx;
-        procInfo.erase(iter);
-
-        LeaveCriticalSection(&crit);
-      } else {
-        // Notification from other thread
-        ResetEvent(event);
-      }
-    } else {
-      // Unexpected return value. For now, warn.
-      cerr << "Reaper::reaperThread(): unexpected return value "
-           << idx << " from WaitForMultipleObjects" << endl;
-    }
-
-    // Clean up these lists
-    delete[] handleList;
-    delete[] dataList;
-  }
-
-  // Time to shut down
-  active = false;
-}
-
-DWORD WINAPI
-Reaper::reaperThreadEntry(LPVOID data) {
-  Reaper* reaper = (Reaper*) data;
-  reaper->reaperThread();
-  return 0;
-}
--- a/agent/src/os/win32/Reaper.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _REAPER_
-#define _REAPER_
-
-#include <vector>
-#include <windows.h>
-
-typedef void ReaperCB(void* userData);
-
-/** A Reaper maintains a thread which waits for child processes to
-    terminate; upon termination it calls a user-specified ReaperCB to
-    clean up resources associated with those child processes. */
-
-class Reaper {
-private:
-  Reaper& operator=(const Reaper&);
-  Reaper(const Reaper&);
-
-public:
-  Reaper(ReaperCB*);
-  ~Reaper();
-
-  // Start the reaper thread.
-  bool start();
-
-  // Stop the reaper thread. This is called automatically in the
-  // reaper's destructor. It is not thread safe and should be called
-  // by at most one thread at a time.
-  bool stop();
-
-  // Register a given child process with the reaper. This should be
-  // called by the application's main thread. When that process
-  // terminates, the cleanup callback will be called with the
-  // specified userData in the context of the reaper thread. Callbacks
-  // are guaranteed to be called serially, so they can safely refer to
-  // static data as well as the given user data.
-  void registerProcess(HANDLE processHandle, void* userData);
-
-private:
-  // For thread safety of register()
-  CRITICAL_SECTION crit;
-
-  ReaperCB* cb;
-
-  // State variables
-  volatile bool active;
-  volatile bool shouldShutDown;
-
-  struct ProcessInfo {
-    HANDLE handle;
-    void* userData;
-  };
-
-  // Bookkeeping
-  std::vector<ProcessInfo> procInfo;
-
-  // Synchronization between application thread and reaper thread
-  HANDLE event;
-
-  // Entry point for reaper thread
-  void reaperThread();
-
-  // Static function which is actual thread entry point
-  static DWORD WINAPI reaperThreadEntry(LPVOID data);
-};
-
-#endif  // #defined _REAPER_
--- a/agent/src/os/win32/SwDbgSrv.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1266 +0,0 @@
-/*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-// A Simple Windows Debug Server.
-//
-// This software provides a socket-based debug server which uses
-// mostly ASCII protocols to communicate with its clients. Since the
-// Windows security model is largely based around being able to run
-// programs on the machine, this server only accepts connections
-// coming from localhost.
-//
-// When run as a service (under Windows NT), this software provides
-// clients the ability to attach to and detach from processes without
-// killing those processes. Ordinarily this is forbidden by the
-// Windows debugging APIs (although more recent debugging environments
-// from Microsoft seem to have circumvented this restriction, perhaps
-// in a different way). This is achieved by forking a persistent
-// subprocess for each debugging session which remains alive as long
-// as the target process is.
-//
-// At this point the client can read information out of the target
-// process's address space. Future work includes exposing more
-// functionality like writing to the remote address space and
-// suspending and resuming threads.
-
-#include <iostream>
-#include <vector>
-#include <stdlib.h>
-// Must come before everything else
-#include <winsock2.h>
-#include <assert.h>
-#include "Dispatcher.hpp"
-#include "Handler.hpp"
-#include "initWinsock.hpp"
-#include "ioUtils.hpp"
-#include "isNT4.hpp"
-#include "Message.hpp"
-#include "nt4internals.hpp"
-#include "ports.h"
-#include "procList.hpp"
-#include "serverLists.hpp"
-#include "Reaper.hpp"
-
-// Uncomment the #define below to get messages on stderr
-// #define DEBUGGING
-
-using namespace std;
-
-static ChildList childList;
-static ClientList clientList;
-static Reaper* reaper = NULL;
-
-// Needed prototypes
-void shutdownChild(ChildInfo* childInfo);
-void detachClient(ClientInfo* clientInfo);
-void shutdownClient(ClientInfo* clientInfo);
-
-char *
-longToDotFormat(long addr)
-{
-  char *temp_s = new char[20];
-
-  sprintf(temp_s, "%d.%d.%d.%d", ((addr & 0xff000000) >> 24),
-          ((addr & 0x00ff0000) >> 16), ((addr & 0x0000ff00) >> 8),
-          (addr & 0x000000ff));
-
-  return temp_s;
-}
-
-// NOTE that we do this query every time. It is a bad idea to cache IP
-// addresses. For example, we might be hosted on a machine using DHCP
-// and the connection addresses might change over time. (Yes, this
-// actually happened.)
-bool
-isConnectionOkay(ULONG connAddr) {
-  if (connAddr == INADDR_LOOPBACK) {
-    return true;
-  }
-
-  const int MAXNAME = 1024;
-  char myname[MAXNAME];
-  gethostname(myname, MAXNAME);
-  struct hostent* myInfo = gethostbyname(myname);
-  if (myInfo == NULL) {
-#ifdef DEBUGGING
-    cerr << "My host information was null" << endl;
-#endif
-  } else {
-    // Run down the list of IP addresses for myself
-    assert(myInfo->h_length == sizeof(ULONG));
-#ifdef DEBUGGING
-    cerr << "My known IP addresses: " << endl;
-#endif
-    for (char** pp = myInfo->h_addr_list; *pp != NULL; pp++) {
-      char* p = *pp;
-      ULONG altAddr = ntohl(*((ULONG*) p));
-#ifdef DEBUGGING
-      char* name = longToDotFormat(altAddr);
-      cerr << name << endl;
-      delete[] name;
-#endif
-      if (altAddr == connAddr) {
-#ifdef DEBUGGING
-        cerr << "FOUND" << endl;
-#endif
-        return true;
-      }
-    }
-#ifdef DEBUGGING
-    cerr << "Done." << endl;
-#endif
-  }
-
-  return false;
-}
-
-SOCKET
-setupListeningSocket(short port) {
-  SOCKET listening = socket(AF_INET, SOCK_STREAM, 0);
-  if (listening == INVALID_SOCKET) {
-    cerr << "Error creating listening socket" << endl;
-    exit(1);
-  }
-
-  int reuseAddress = 1;
-  if (setsockopt(listening, SOL_SOCKET, SO_REUSEADDR,
-                 (char *)&reuseAddress, sizeof(reuseAddress)) == -1) {
-    cerr << "Error reusing address" << endl;
-    exit(1);
-  }
-
-  struct sockaddr_in serverInfo;
-
-  memset((char *)&serverInfo, 0, sizeof(serverInfo));
-  serverInfo.sin_addr.s_addr = INADDR_ANY;
-  serverInfo.sin_family = AF_INET;
-  serverInfo.sin_port = htons(port);
-
-  if (bind(listening, (struct sockaddr *) &serverInfo, sizeof(serverInfo)) < 0) {
-    cerr << "Error binding socket" << endl;
-    exit(1);
-  }
-
-  if (listen(listening, 5) < 0) {
-    cerr << "Error listening" << endl;
-    exit(1);
-  }
-
-  return listening;
-}
-
-/** Accepts a connection from the given listening socket, but only if
-    the connection came from localhost. Returns INVALID_SOCKET if the
-    connection came from any other IP address or if an error occurred
-    during the call to accept(). */
-SOCKET
-acceptFromLocalhost(SOCKET listening) {
-  struct sockaddr_in peerAddr;
-  int peerAddrLen = sizeof(peerAddr);
-  SOCKET fd = accept(listening, (sockaddr*) &peerAddr, &peerAddrLen);
-  if (fd == INVALID_SOCKET) {
-    return fd;
-  }
-
-  if (!isConnectionOkay(ntohl(peerAddr.sin_addr.s_addr))) {
-    // Reject connections from other machines for security purposes.
-    // The Windows security model seems to assume one user per
-    // machine, and that security is compromised if another user is
-    // able to run executables on the given host. (If these
-    // assumptions are not strict enough, we will have to change
-    // this.)
-    shutdown(fd, SD_BOTH);
-    closesocket(fd);
-    return INVALID_SOCKET;
-  }
-
-  // Disable TCP buffering on all sockets. We send small amounts of
-  // data back and forth and don't want buffering.
-  int buffer_val = 1;
-  if (setsockopt(fd, IPPROTO_IP, TCP_NODELAY,
-                 (char *) &buffer_val, sizeof(buffer_val)) < 0) {
-    shutdown(fd, SD_BOTH);
-    closesocket(fd);
-  }
-
-  return fd;
-}
-
-void
-reapCB(void* arg) {
-  ChildInfo* info = (ChildInfo*) arg;
-  ListsLocker ll;
-  DWORD pid = info->getPid();
-  shutdownChild(info);
-#ifdef DEBUGGING
-  cerr << "Reaped child for process " << pid << endl;
-#endif
-}
-
-/** Starts a child process with stdin and stdout redirected to pipes,
-    handles to which are returned. auxHandle1 and auxHandle2 should be
-    closed as well when the child process exits. Returns false if
-    process creation failed. */
-bool
-startChildProcess(DWORD pidToDebug,
-                  DWORD childStdinBufSize,
-                  DWORD childStdoutBufSize,
-                  LPHANDLE childProcessHandle,
-                  LPHANDLE writeToStdinHandle,
-                  LPHANDLE readFromStdoutHandle,
-                  LPHANDLE auxHandle1,
-                  LPHANDLE auxHandle2) {
-  // Code adapted from Microsoft example
-  // "Creating a Child Process with Redirected Input and Output"
-
-  SECURITY_ATTRIBUTES saAttr;
-  BOOL fSuccess;
-
-  HANDLE hChildStdinRd, hChildStdinWr, hChildStdinWrDup,
-    hChildStdoutRd, hChildStdoutWr, hChildStdoutRdDup,
-    hSaveStdin, hSaveStdout;
-
-  // Set the bInheritHandle flag so pipe handles are inherited.
-  saAttr.nLength = sizeof(SECURITY_ATTRIBUTES);
-  saAttr.bInheritHandle = TRUE;
-  saAttr.lpSecurityDescriptor = NULL;
-
-  // The steps for redirecting child process's STDOUT:
-  //   1. Save current STDOUT, to be restored later.
-  //   2. Create anonymous pipe to be STDOUT for child process.
-  //   3. Set STDOUT of the parent process to be write handle to
-  //      the pipe, so it is inherited by the child process.
-  //   4. Create a noninheritable duplicate of the read handle and
-  //      close the inheritable read handle.
-
-  // Save the handle to the current STDOUT.
-  hSaveStdout = GetStdHandle(STD_OUTPUT_HANDLE);
-  // Create a pipe for the child process's STDOUT.
-  if (! CreatePipe(&hChildStdoutRd, &hChildStdoutWr, &saAttr, childStdoutBufSize)) {
-    return false;
-  }
-  // Set a write handle to the pipe to be STDOUT.
-  if (! SetStdHandle(STD_OUTPUT_HANDLE, hChildStdoutWr)) {
-    return false;
-  }
-  // Create noninheritable read handle and close the inheritable read
-  // handle.
-  fSuccess = DuplicateHandle(GetCurrentProcess(), hChildStdoutRd,
-                             GetCurrentProcess(), &hChildStdoutRdDup,
-                             0, FALSE,
-                             DUPLICATE_SAME_ACCESS);
-  if( !fSuccess ) {
-    return false;
-  }
-  CloseHandle(hChildStdoutRd);
-
-  // The steps for redirecting child process's STDIN:
-  //   1.  Save current STDIN, to be restored later.
-  //   2.  Create anonymous pipe to be STDIN for child process.
-  //   3.  Set STDIN of the parent to be the read handle to the
-  //       pipe, so it is inherited by the child process.
-  //   4.  Create a noninheritable duplicate of the write handle,
-  //       and close the inheritable write handle.
-  // Save the handle to the current STDIN.
-  hSaveStdin = GetStdHandle(STD_INPUT_HANDLE);
-  // Create a pipe for the child process's STDIN.
-  if (! CreatePipe(&hChildStdinRd, &hChildStdinWr, &saAttr, childStdinBufSize)) {
-    return false;
-  }
-  // Set a read handle to the pipe to be STDIN.
-  if (! SetStdHandle(STD_INPUT_HANDLE, hChildStdinRd)) {
-    return false;
-  }
-  // Duplicate the write handle to the pipe so it is not inherited.
-  fSuccess = DuplicateHandle(GetCurrentProcess(), hChildStdinWr,
-                             GetCurrentProcess(), &hChildStdinWrDup, 0,
-                             FALSE,                  // not inherited
-                             DUPLICATE_SAME_ACCESS);
-  if (! fSuccess) {
-    return false;
-  }
-  CloseHandle(hChildStdinWr);
-
-  // Create the child process
-  char cmdLine[256];
-  sprintf(cmdLine, "SwDbgSub.exe %u", pidToDebug);
-  PROCESS_INFORMATION procInfo;
-  STARTUPINFO startInfo;
-  memset((char*) &startInfo, 0, sizeof(startInfo));
-  startInfo.cb = sizeof(startInfo);
-  BOOL res = CreateProcess(NULL,
-                           cmdLine,
-                           NULL,
-                           NULL,
-                           TRUE, // inherit handles: important
-                           0,
-                           NULL,
-                           NULL,
-                           &startInfo,
-                           &procInfo);
-  if (!res) {
-    return false;
-  }
-  // After process creation, restore the saved STDIN and STDOUT.
-  if (! SetStdHandle(STD_INPUT_HANDLE, hSaveStdin)) {
-    return false;
-  }
-  if (! SetStdHandle(STD_OUTPUT_HANDLE, hSaveStdout)) {
-    return false;
-  }
-
-  // hChildStdinWrDup can be used to write to the child's stdin
-  // hChildStdoutRdDup can be used to read from the child's stdout
-
-  // NOTE: example code closes hChildStdoutWr before reading from
-  // hChildStdoutRdDup. "Close the write end of the pipe before
-  // reading from the read end of the pipe"??? Looks like this is
-  // example-specific.
-
-  // Set up return arguments
-  // hChildStdoutRd and hChildStdinWr are already closed at this point
-  *childProcessHandle = procInfo.hProcess;
-  *writeToStdinHandle = hChildStdinWrDup;
-  *readFromStdoutHandle = hChildStdoutRdDup;
-  *auxHandle1 = hChildStdinRd;
-  *auxHandle2 = hChildStdoutWr;
-  return true;
-}
-
-/** Clears the event and writes the message to the child process */
-bool
-sendMessage(ChildInfo* child, Message* message) {
-  DWORD numBytesWritten;
-  if (!WriteFile(child->getWriteToStdinHandle(),
-                 message, sizeof(Message), &numBytesWritten, NULL)) {
-    return false;
-  }
-  if (numBytesWritten != sizeof(Message)) {
-    return false;
-  }
-  // Follow up "poke" messages with the raw data
-  if (message->type == Message::POKE) {
-    if (!WriteFile(child->getWriteToStdinHandle(),
-                   message->pokeArg.data, message->pokeArg.numBytes, &numBytesWritten, NULL)) {
-      return false;
-    }
-    if (numBytesWritten != message->pokeArg.numBytes) {
-      return false;
-    }
-  }
-  return true;
-}
-
-/** Copies data from child's stdout to the client's IOBuf and sends it
-    along */
-bool
-forwardReplyToClient(ChildInfo* child, ClientInfo* client) {
-  DWORD total = 0;
-  IOBuf::FillState ret;
-
-  do {
-    DWORD temp;
-    ret = client->getIOBuf()->fillFromFileHandle(child->getReadFromStdoutHandle(),
-                                                 &temp);
-    if (ret == IOBuf::DONE || ret == IOBuf::MORE_DATA_PENDING) {
-      if (!client->getIOBuf()->flush()) {
-#ifdef DEBUGGING
-        cerr << "Forward failed because flush failed" << endl;
-#endif
-        return false;
-      }
-      total += temp;
-    }
-  } while (ret == IOBuf::MORE_DATA_PENDING);
-
-  return (ret == IOBuf::FAILED) ? false : true;
-}
-
-//----------------------------------------------------------------------
-// Server Handler
-//
-
-class ServerHandler : public Handler {
-public:
-  ServerHandler();
-
-  // Starts up in Unicode mode by default
-  bool getASCII();
-
-  void setIOBuf(IOBuf* ioBuf);
-
-  void procList(char* arg);
-
-  // Must be called before calling one of the routines below
-  void setClientInfo(ClientInfo* info);
-
-  // Indicates to outer loop that exit was called or that an error
-  // occurred and that the client exited.
-  bool exited();
-  // Clears this state
-  void clearExited();
-
-  void ascii(char* arg);
-  void unicode(char* arg);
-  void attach(char* arg);
-  void detach(char* arg);
-  void libInfo(char* arg);
-  void peek(char* arg);
-  void poke(char* arg);
-  void threadList(char* arg);
-  void dupHandle(char* arg);
-  void closeHandle(char* arg);
-  void getContext(char* arg);
-  void setContext(char* arg);
-  void selectorEntry(char* arg);
-  void suspend(char* arg);
-  void resume(char* arg);
-  void pollEvent(char* arg);
-  void continueEvent(char* arg);
-  void exit(char* arg);
-
-  // This is pretty gross. Needed to make the target process know
-  // about clients that have disconnected unexpectedly while attached.
-  friend void shutdownClient(ClientInfo*);
-private:
-  // Writes: charSize <space> numChars <space> <binary string>
-  // Handles both ASCII and UNICODE modes
-  void writeString(USHORT len, WCHAR* str);
-
-  // Handles only ASCII mode
-  void writeString(USHORT len, char* str);
-
-  ClientInfo* clientInfo;
-  IOBuf* ioBuf;
-  bool _exited;
-  bool _ascii;
-};
-
-static ServerHandler* handler;
-
-ServerHandler::ServerHandler() {
-  _exited = false;
-  _ascii = false;
-  ioBuf = NULL;
-}
-
-bool
-ServerHandler::getASCII() {
-  return _ascii;
-}
-
-void
-ServerHandler::setIOBuf(IOBuf* buf) {
-  ioBuf = buf;
-}
-
-void
-ServerHandler::setClientInfo(ClientInfo* info) {
-  clientInfo = info;
-}
-
-bool
-ServerHandler::exited() {
-  return _exited;
-}
-
-void
-ServerHandler::clearExited() {
-  _exited = false;
-}
-
-void
-ServerHandler::ascii(char* arg) {
-  _ascii = true;
-}
-
-void
-ServerHandler::unicode(char* arg) {
-  _ascii = false;
-}
-
-void
-ServerHandler::procList(char* arg) {
-#ifdef DEBUGGING
-  cerr << "proclist" << endl;
-#endif
-
-  ProcEntryList processes;
-  ::procList(processes);
-
-  ioBuf->writeInt(processes.size());
-
-  for (ProcEntryList::iterator iter = processes.begin();
-       iter != processes.end(); iter++) {
-    ProcEntry& entry = *iter;
-    ioBuf->writeSpace();
-    ioBuf->writeUnsignedInt(entry.getPid());
-    ioBuf->writeSpace();
-    writeString(entry.getNameLength(), entry.getName());
-  }
-
-  ioBuf->writeEOL();
-  ioBuf->flush();
-}
-
-void
-ServerHandler::attach(char* arg) {
-  // If the client is already attached to a process, fail.
-  if (clientInfo->getTarget() != NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->writeEOL();
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get pid
-  DWORD pid;
-  if (!scanUnsignedLong(&arg, &pid)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->writeEOL();
-    ioBuf->flush();
-    return;
-  }
-
-  // See whether this pid is already forked
-  ListsLocker ll;
-  ChildInfo* childInfo = childList.getChildByPid(pid);
-  if (childInfo != NULL) {
-    // If this child already has a client, return false
-    if (childInfo->getClient() != NULL) {
-      ioBuf->writeBoolAsInt(false);
-      ioBuf->writeEOL();
-      ioBuf->flush();
-      return;
-    }
-
-    // Otherwise, can associate this client with this child process
-    childInfo->setClient(clientInfo);
-    clientInfo->setTarget(childInfo);
-
-    // Tell the child we are attaching so it can suspend the target
-    // process
-    Message msg;
-    msg.type = Message::ATTACH;
-    sendMessage(childInfo, &msg);
-
-    ioBuf->writeBoolAsInt(true);
-    ioBuf->writeEOL();
-    ioBuf->flush();
-    return;
-  } else {
-    // Have to fork a new child subprocess
-    HANDLE childProcessHandle;
-    HANDLE writeToStdinHandle;
-    HANDLE readFromStdoutHandle;
-    HANDLE auxHandle1;
-    HANDLE auxHandle2;
-    if (!startChildProcess(pid,
-                           32768,
-                           131072,
-                           &childProcessHandle,
-                           &writeToStdinHandle,
-                           &readFromStdoutHandle,
-                           &auxHandle1,
-                           &auxHandle2)) {
-      ioBuf->writeBoolAsInt(false);
-      ioBuf->writeEOL();
-      ioBuf->flush();
-      return;
-    }
-
-    // See whether the child succeeded in attaching to the process
-    char res;
-    DWORD numRead;
-    if (!ReadFile(readFromStdoutHandle,
-                  &res,
-                  sizeof(char),
-                  &numRead,
-                  NULL)) {
-      ioBuf->writeBoolAsInt(false);
-      ioBuf->writeEOL();
-      ioBuf->flush();
-      return;
-    }
-
-    if (!res) {
-      ioBuf->writeBoolAsInt(false);
-      ioBuf->writeEOL();
-      ioBuf->flush();
-      return;
-    }
-
-    // OK, success.
-    childInfo = new ChildInfo(pid, childProcessHandle,
-                              writeToStdinHandle, readFromStdoutHandle,
-                              auxHandle1, auxHandle2);
-    childList.addChild(childInfo);
-    reaper->registerProcess(childProcessHandle, childInfo);
-    // Associate this client with this child process
-    childInfo->setClient(clientInfo);
-    clientInfo->setTarget(childInfo);
-
-    // Tell the child process to actually suspend the target process
-    Message msg;
-    msg.type = Message::ATTACH;
-    sendMessage(childInfo, &msg);
-
-    // Write result to client
-    ioBuf->writeBoolAsInt(true);
-    ioBuf->writeEOL();
-    ioBuf->flush();
-    return;
-  }
-}
-
-void
-ServerHandler::detach(char* arg) {
-  // If the client is not attached, fail.
-  if (clientInfo->getTarget() == NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->writeEOL();
-    ioBuf->flush();
-    return;
-  }
-
-  detachClient(clientInfo);
-
-  ioBuf->writeBoolAsInt(true);
-  ioBuf->writeEOL();
-  ioBuf->flush();
-}
-
-void
-ServerHandler::libInfo(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeInt(0);
-    ioBuf->writeEOL();
-    ioBuf->flush();
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::LIBINFO;
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::peek(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeString("B");
-    ioBuf->writeBinChar(0);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get address
-  DWORD address;
-  if (!scanAddress(&arg, &address)) {
-    ioBuf->writeString("B");
-    ioBuf->writeBinChar(0);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get number of bytes
-  DWORD numBytes;
-  if (!scanUnsignedLong(&arg, &numBytes)) {
-    ioBuf->writeString("B");
-    ioBuf->writeBinChar(0);
-    ioBuf->flush();
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::PEEK;
-  msg.peekArg.address = address;
-  msg.peekArg.numBytes = numBytes;
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::poke(char* arg) {
-#ifdef DEBUGGING
-  cerr << "ServerHandler::poke" << endl;
-#endif
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get address
-  DWORD address;
-  if (!scanAddress(&arg, &address)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get number of bytes
-  if (!scanAndSkipBinEscapeChar(&arg)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-  DWORD numBytes;
-  if (!scanBinUnsignedLong(&arg, &numBytes)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Raw data is now in "arg"
-  // Send message to child
-  Message msg;
-  msg.type = Message::POKE;
-  msg.pokeArg.address = address;
-  msg.pokeArg.numBytes = numBytes;
-  msg.pokeArg.data = arg;
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::threadList(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::THREADLIST;
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::dupHandle(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get handle
-  DWORD address;
-  if (!scanAddress(&arg, &address)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::DUPHANDLE;
-  msg.handleArg.handle = (HANDLE) address;
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::closeHandle(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    return;
-  }
-
-  // Try to get handle
-  DWORD address;
-  if (!scanAddress(&arg, &address)) {
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::CLOSEHANDLE;
-  msg.handleArg.handle = (HANDLE) address;
-  sendMessage(child, &msg);
-
-  // No reply
-}
-
-void
-ServerHandler::getContext(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get handle
-  DWORD address;
-  if (!scanAddress(&arg, &address)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::GETCONTEXT;
-  msg.handleArg.handle = (HANDLE) address;
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::setContext(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get handle
-  DWORD address;
-  if (!scanAddress(&arg, &address)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get context
-  DWORD regs[NUM_REGS_IN_CONTEXT];
-  for (int i = 0; i < NUM_REGS_IN_CONTEXT; i++) {
-    if (!scanAddress(&arg, &regs[i])) {
-      ioBuf->writeBoolAsInt(false);
-      ioBuf->flush();
-      return;
-    }
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::SETCONTEXT;
-  msg.setContextArg.handle = (HANDLE) address;
-  msg.setContextArg.Eax    = regs[0];
-  msg.setContextArg.Ebx    = regs[1];
-  msg.setContextArg.Ecx    = regs[2];
-  msg.setContextArg.Edx    = regs[3];
-  msg.setContextArg.Esi    = regs[4];
-  msg.setContextArg.Edi    = regs[5];
-  msg.setContextArg.Ebp    = regs[6];
-  msg.setContextArg.Esp    = regs[7];
-  msg.setContextArg.Eip    = regs[8];
-  msg.setContextArg.Ds     = regs[9];
-  msg.setContextArg.Es     = regs[10];
-  msg.setContextArg.Fs     = regs[11];
-  msg.setContextArg.Gs     = regs[12];
-  msg.setContextArg.Cs     = regs[13];
-  msg.setContextArg.Ss     = regs[14];
-  msg.setContextArg.EFlags = regs[15];
-  msg.setContextArg.Dr0    = regs[16];
-  msg.setContextArg.Dr1    = regs[17];
-  msg.setContextArg.Dr2    = regs[18];
-  msg.setContextArg.Dr3    = regs[19];
-  msg.setContextArg.Dr6    = regs[20];
-  msg.setContextArg.Dr7    = regs[21];
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::selectorEntry(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get thread handle
-  DWORD address;
-  if (!scanAddress(&arg, &address)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get selector
-  DWORD selector;
-  if (!scanUnsignedLong(&arg, &selector)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::SELECTORENTRY;
-  msg.selectorArg.handle   = (HANDLE) address;
-  msg.selectorArg.selector = selector;
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::suspend(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::SUSPEND;
-  sendMessage(child, &msg);
-
-  // No reply
-}
-
-void
-ServerHandler::resume(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::RESUME;
-  sendMessage(child, &msg);
-
-  // No reply
-}
-
-void
-ServerHandler::pollEvent(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::POLLEVENT;
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::continueEvent(char* arg) {
-  ListsLocker ll;
-  ChildInfo* child = clientInfo->getTarget();
-  if (child == NULL) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Try to get bool arg
-  int passEventToClient;
-  if (!scanInt(&arg, &passEventToClient)) {
-    ioBuf->writeBoolAsInt(false);
-    ioBuf->flush();
-    return;
-  }
-
-  // Send message to child
-  Message msg;
-  msg.type = Message::CONTINUEEVENT;
-  msg.boolArg.val = ((passEventToClient != 0) ? true : false);
-  sendMessage(child, &msg);
-
-  // Forward reply to client
-  forwardReplyToClient(child, clientInfo);
-}
-
-void
-ServerHandler::exit(char* arg) {
-  shutdownClient(clientInfo);
-  _exited = true;
-}
-
-void
-ServerHandler::writeString(USHORT len, WCHAR* str) {
-  if (_ascii) {
-    char* cStr = new char[len + 1];
-    sprintf(cStr, "%.*ls", len, str);
-    writeString(len, cStr);
-    delete[] cStr;
-  } else {
-    ioBuf->writeInt(sizeof(unsigned short));
-    ioBuf->writeSpace();
-    ioBuf->writeInt(len);
-    ioBuf->writeSpace();
-    for (int i = 0; i < len; i++) {
-      ioBuf->writeBinUnsignedShort(str[i]);
-    }
-  }
-}
-
-void
-ServerHandler::writeString(USHORT len, char* str) {
-  ioBuf->writeInt(1);
-  ioBuf->writeSpace();
-  ioBuf->writeInt(len);
-  ioBuf->writeSpace();
-  ioBuf->writeString(str);
-}
-
-//
-//----------------------------------------------------------------------
-
-//----------------------------------------------------------------------
-// Shutdown routines
-//
-
-void
-shutdownChild(ChildInfo* childInfo) {
-  childList.removeChild(childInfo);
-  childInfo->closeAll();
-  if (childInfo->getClient() != NULL) {
-    shutdownClient(childInfo->getClient());
-  }
-  delete childInfo;
-}
-
-void
-detachClient(ClientInfo* info) {
-  ListsLocker ll;
-  // May have been dissociated while not under cover of lock
-  if (info->getTarget() == NULL) {
-    return;
-  }
-
-  // Tell the child that we have detached to let the target process
-  // continue running
-  Message msg;
-  msg.type = Message::DETACH;
-  sendMessage(info->getTarget(), &msg);
-
-  // Dissociate the client and the target
-  info->getTarget()->setClient(NULL);
-  info->setTarget(NULL);
-}
-
-void
-shutdownClient(ClientInfo* clientInfo) {
-#ifdef DEBUGGING
-  cerr << "Shutting down client" << endl;
-#endif
-
-  // If we're connected, inform the target process that we're
-  // disconnecting
-  detachClient(clientInfo);
-
-  // Remove this client from the list and delete it
-  clientList.removeClient(clientInfo);
-  if (clientInfo->getTarget() != NULL) {
-    clientInfo->getTarget()->setClient(NULL);
-  }
-  clientInfo->closeAll();
-  delete clientInfo;
-}
-
-//
-//----------------------------------------------------------------------
-
-
-/** Main dispatcher for client commands. NOTE: do not refer to this
-    clientInfo data structure after calling this routine, as it may be
-    deleted internally. */
-void
-readAndDispatch(ClientInfo* clientInfo) {
-  IOBuf::ReadLineResult res;
-  IOBuf* ioBuf = clientInfo->getIOBuf();
-  unsigned long howMany;
-  ioctlsocket(clientInfo->getDataSocket(), FIONREAD, &howMany);
-  if (howMany == 0) {
-    // Client closed down.
-    shutdownClient(clientInfo);
-    return;
-  }
-  // Read and process as much data as possible
-  do {
-    res = ioBuf->tryReadLine();
-    if (res == IOBuf::RL_ERROR) {
-#ifdef DEBUGGING
-      cerr << "Error while reading line" << endl;
-#endif
-      shutdownClient(clientInfo);
-      return;
-    } else if (res == IOBuf::RL_GOT_DATA) {
-#ifdef DEBUGGING
-      cerr << "Got data: \"" << ioBuf->getLine() << "\"" << endl;
-#endif
-      handler->setIOBuf(ioBuf);
-      handler->setClientInfo(clientInfo);
-      handler->clearExited();
-      Dispatcher::dispatch(ioBuf->getLine(), handler);
-    }
-  } while (res == IOBuf::RL_GOT_DATA && (!handler->exited()));
-#ifdef DEBUGGING
-  cerr << "Exiting readAndDispatch" << endl;
-#endif
-}
-
-int
-main(int argc, char **argv)
-{
-  initWinsock();
-
-  if (isNT4()) {
-    loadPSAPIDLL(); // Will exit if not present
-  }
-
-  SOCKET clientListeningSock = setupListeningSocket(CLIENT_PORT);
-
-  handler = new ServerHandler();
-  Lists::init();
-
-  reaper = new Reaper(&reapCB);
-  if (!reaper->start()) {
-    exit(1);
-  }
-
-  while (true) {
-    // Select on all sockets:
-    //  - client listening socket
-    //  - sockets for all client connections
-
-    // When one of the client connections closes, close its socket
-    // handles.
-
-    fd_set set;
-    SOCKET maxSock = 0;
-
-    // Set up fd_set
-    {
-      int i;
-      FD_ZERO(&set);
-      FD_SET(clientListeningSock, &set);
-      if (clientListeningSock > maxSock) {
-        maxSock = clientListeningSock;
-      }
-      for (i = 0; i < clientList.size(); i++) {
-        ClientInfo* info = clientList.get(i);
-        if (info->getDataSocket() > maxSock) {
-          maxSock = info->getDataSocket();
-        }
-        FD_SET(info->getDataSocket(), &set);
-      }
-    }
-    struct timeval timeout;
-    timeout.tv_sec = 300; // 5 minutes
-    timeout.tv_usec = 0;
-    int res = select(maxSock, &set, NULL, NULL, &timeout);
-    if (res > 0) {
-
-      ////////////////
-      // New client //
-      ////////////////
-      if (FD_ISSET(clientListeningSock, &set)) {
-        SOCKET fd = acceptFromLocalhost(clientListeningSock);
-        if (fd != INVALID_SOCKET) {
-          // Create new client information object
-          ClientInfo* info = new ClientInfo(fd);
-          // Add to list of clients
-          clientList.addClient(info);
-#ifdef DEBUGGING
-          cerr << "New client" << endl;
-#endif
-        }
-      }
-
-      ///////////////////////////
-      // Commands from clients //
-      ///////////////////////////
-      ClientInfo* clientInfo;
-      if (clientList.isAnyDataSocketSet(&set, &clientInfo)) {
-        readAndDispatch(clientInfo);
-      }
-    } else if (res < 0) {
-      // Looks like one of the clients was killed. Try to figure out which one.
-      bool found = false;
-      fd_set set;
-      struct timeval timeout;
-      timeout.tv_sec = 0;
-      timeout.tv_usec = 0;
-      for (int i = 0; i < clientList.size(); i++) {
-        ClientInfo* info = clientList.get(i);
-        FD_ZERO(&set);
-        FD_SET(info->getDataSocket(), &set);
-        if (select(1 + info->getDataSocket(), &set, NULL, NULL, &timeout) < 0) {
-          found = true;
-          clientList.removeClient(info);
-          info->closeAll();
-          delete info;
-          break;
-        }
-      }
-      if (!found) {
-        // This indicates trouble -- one of our listening sockets died.
-        exit(1);
-      }
-    }
-  }
-
-  return 0;
-}
--- a/agent/src/os/win32/SwDbgSrv.dsp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-# Microsoft Developer Studio Project File - Name="SwDbgSrv" - Package Owner=<4>
-# Microsoft Developer Studio Generated Build File, Format Version 6.00
-# ** DO NOT EDIT **
-
-# TARGTYPE "Win32 (x86) Console Application" 0x0103
-
-CFG=SwDbgSrv - Win32 Debug
-!MESSAGE This is not a valid makefile. To build this project using NMAKE,
-!MESSAGE use the Export Makefile command and run
-!MESSAGE 
-!MESSAGE NMAKE /f "SwDbgSrv.mak".
-!MESSAGE 
-!MESSAGE You can specify a configuration when running NMAKE
-!MESSAGE by defining the macro CFG on the command line. For example:
-!MESSAGE 
-!MESSAGE NMAKE /f "SwDbgSrv.mak" CFG="SwDbgSrv - Win32 Debug"
-!MESSAGE 
-!MESSAGE Possible choices for configuration are:
-!MESSAGE 
-!MESSAGE "SwDbgSrv - Win32 Release" (based on "Win32 (x86) Console Application")
-!MESSAGE "SwDbgSrv - Win32 Debug" (based on "Win32 (x86) Console Application")
-!MESSAGE 
-
-# Begin Project
-# PROP AllowPerConfigDependencies 0
-# PROP Scc_ProjName ""
-# PROP Scc_LocalPath ""
-CPP=cl.exe
-RSC=rc.exe
-
-!IF  "$(CFG)" == "SwDbgSrv - Win32 Release"
-
-# PROP BASE Use_MFC 0
-# PROP BASE Use_Debug_Libraries 0
-# PROP BASE Output_Dir "Release"
-# PROP BASE Intermediate_Dir "Release"
-# PROP BASE Target_Dir ""
-# PROP Use_MFC 0
-# PROP Use_Debug_Libraries 0
-# PROP Output_Dir "Release"
-# PROP Intermediate_Dir "Release"
-# PROP Ignore_Export_Lib 0
-# PROP Target_Dir ""
-# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD BASE RSC /l 0x409 /d "NDEBUG"
-# ADD RSC /l 0x409 /d "NDEBUG"
-BSC32=bscmake.exe
-# ADD BASE BSC32 /nologo
-# ADD BSC32 /nologo
-LINK32=link.exe
-# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
-# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib ws2_32.lib /nologo /subsystem:console /machine:I386
-
-!ELSEIF  "$(CFG)" == "SwDbgSrv - Win32 Debug"
-
-# PROP BASE Use_MFC 0
-# PROP BASE Use_Debug_Libraries 1
-# PROP BASE Output_Dir "SwDbgSrv___Win32_Debug"
-# PROP BASE Intermediate_Dir "SwDbgSrv___Win32_Debug"
-# PROP BASE Target_Dir ""
-# PROP Use_MFC 0
-# PROP Use_Debug_Libraries 1
-# PROP Output_Dir "Debug"
-# PROP Intermediate_Dir "Debug"
-# PROP Ignore_Export_Lib 0
-# PROP Target_Dir ""
-# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
-# ADD CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
-# ADD BASE RSC /l 0x409 /d "_DEBUG"
-# ADD RSC /l 0x409 /d "_DEBUG"
-BSC32=bscmake.exe
-# ADD BASE BSC32 /nologo
-# ADD BSC32 /nologo
-LINK32=link.exe
-# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
-# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib ws2_32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
-
-!ENDIF 
-
-# Begin Target
-
-# Name "SwDbgSrv - Win32 Release"
-# Name "SwDbgSrv - Win32 Debug"
-# Begin Group "Source Files"
-
-# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"
-# Begin Source File
-
-SOURCE=.\Buffer.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\Dispatcher.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\initWinsock.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\IOBuf.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\ioUtils.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\isNT4.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\nt4internals.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\procList.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\Reaper.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\serverLists.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\SwDbgSrv.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\toolHelp.cpp
-# End Source File
-# End Group
-# Begin Group "Header Files"
-
-# PROP Default_Filter "h;hpp;hxx;hm;inl"
-# End Group
-# Begin Group "Resource Files"
-
-# PROP Default_Filter "ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe"
-# End Group
-# End Target
-# End Project
--- a/agent/src/os/win32/SwDbgSrv.dsw	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-Microsoft Developer Studio Workspace File, Format Version 6.00
-# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
-
-###############################################################################
-
-Project: "SwDbgSrv"=.\SwDbgSrv.dsp - Package Owner=<4>
-
-Package=<5>
-{{{
-}}}
-
-Package=<4>
-{{{
-}}}
-
-###############################################################################
-
-Project: "SwDbgSub"=.\SwDbgSub.dsp - Package Owner=<4>
-
-Package=<5>
-{{{
-}}}
-
-Package=<4>
-{{{
-}}}
-
-###############################################################################
-
-Global:
-
-Package=<5>
-{{{
-}}}
-
-Package=<3>
-{{{
-}}}
-
-###############################################################################
-
--- a/agent/src/os/win32/SwDbgSub.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,883 +0,0 @@
-/*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-// This is the source code for the subprocess forked by the Simple
-// Windows Debug Server. It assumes most of the responsibility for the
-// debug session, and processes all of the commands sent by clients.
-
-// Disable too-long symbol warnings
-#pragma warning ( disable : 4786 )
-
-#include <iostream>
-#include <vector>
-#include <stdlib.h>
-#include <assert.h>
-// Must come before windows.h
-#include <winsock2.h>
-#include <windows.h>
-#include "IOBuf.hpp"
-#include "libInfo.hpp"
-#include "LockableList.hpp"
-#include "Message.hpp"
-#include "Monitor.hpp"
-#include "nt4internals.hpp"
-
-// Uncomment the #define below to get messages on stderr
-// #define DEBUGGING
-
-using namespace std;
-
-DWORD pid;
-HANDLE procHandle;
-IOBuf* ioBuf;
-
-// State flags indicating whether the attach to the remote process
-// definitively succeeded or failed
-volatile bool attachFailed    = false;
-volatile bool attachSucceeded = false;
-
-// State flag indicating whether the target process is suspended.
-// Modified by suspend()/resume(), viewed by debug thread, but only
-// under cover of the threads lock.
-volatile bool suspended       = false;
-
-// State flags indicating whether we are considered to be attached to
-// the target process and are therefore queuing up events to be sent
-// back to the debug server. These flags are only accessed and
-// modified under the cover of the eventLock.
-Monitor* eventLock;
-// The following is set to true when a client is attached to this process
-volatile bool generateDebugEvents = false;
-// Pointer to current debug event; non-NULL indicates a debug event is
-// waiting to be sent to the client. Main thread sets this to NULL to
-// indicate that the event has been consumed; also sets
-// passEventToClient, below.
-volatile DEBUG_EVENT* curDebugEvent = NULL;
-// Set by main thread to indicate whether the most recently posted
-// debug event should be passed on to the target process.
-volatile bool passEventToClient = true;
-
-void conditionalPostDebugEvent(DEBUG_EVENT* ev, DWORD* continueOrNotHandledFlag) {
-  // FIXME: make it possible for the client to enable and disable
-  // certain types of events (have to do so in a platform-independent
-  // manner)
-  switch (ev->dwDebugEventCode) {
-  case EXCEPTION_DEBUG_EVENT:
-    switch (ev->u.Exception.ExceptionRecord.ExceptionCode) {
-    case EXCEPTION_BREAKPOINT:  break;
-    case EXCEPTION_SINGLE_STEP: break;
-    case EXCEPTION_ACCESS_VIOLATION: break;
-    default: return;
-    }
-  }
-  eventLock->lock();
-  if (generateDebugEvents) {
-    curDebugEvent = ev;
-    while (curDebugEvent != NULL) {
-      eventLock->wait();
-    }
-    if (passEventToClient) {
-      *continueOrNotHandledFlag = DBG_EXCEPTION_NOT_HANDLED;
-    } else {
-      *continueOrNotHandledFlag = DBG_CONTINUE;
-    }
-  }
-  eventLock->unlock();
-}
-
-
-//----------------------------------------------------------------------
-// Module list
-//
-
-vector<LibInfo> libs;
-
-//----------------------------------------------------------------------
-// Thread list
-//
-
-struct ThreadInfo {
-  DWORD tid;
-  HANDLE thread;
-
-  ThreadInfo(DWORD tid, HANDLE thread) {
-    this->tid = tid;
-    this->thread = thread;
-  }
-};
-
-class ThreadList : public LockableList<ThreadInfo> {
-public:
-  bool removeByThreadID(DWORD tid) {
-    for (InternalListType::iterator iter = internalList.begin();
-         iter != internalList.end(); iter++) {
-      if ((*iter).tid == tid) {
-        internalList.erase(iter);
-        return true;
-      }
-    }
-    return false;
-  }
-  HANDLE threadIDToHandle(DWORD tid) {
-    for (InternalListType::iterator iter = internalList.begin();
-         iter != internalList.end(); iter++) {
-      if ((*iter).tid == tid) {
-        return (*iter).thread;
-      }
-    }
-    return NULL;
-  }
-};
-
-ThreadList threads;
-
-//----------------------------------------------------------------------
-// INITIALIZATION AND TERMINATION
-//
-
-void
-printError(const char* prefix) {
-  DWORD detail = GetLastError();
-  LPTSTR message;
-  FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
-                FORMAT_MESSAGE_FROM_SYSTEM,
-                0,
-                detail,
-                0,
-                (LPTSTR) &message,
-                1,
-                NULL);
-  // FIXME: This is signaling an error: "The handle is invalid." ?
-  // Do I have to do all of my WaitForDebugEvent calls from the same thread?
-  cerr << prefix << ": " << message << endl;
-  LocalFree(message);
-}
-
-void
-endProcess(bool waitForProcess = true) {
-  NT4::unloadNTDLL();
-  if (waitForProcess) {
-    // Though we're exiting because of an error, do not tear down the
-    // target process.
-    WaitForSingleObject(procHandle, INFINITE);
-  }
-  CloseHandle(procHandle);
-  exit(0);
-}
-
-DWORD WINAPI
-debugThreadEntry(void*) {
-#ifdef DEBUGGING
-  DWORD lastMsgId = 0;
-  int count = 0;
-#endif
-
-  if (!DebugActiveProcess(pid)) {
-    attachFailed = true;
-    return 0;
-  }
-
-  // Wait for debug events. We keep the information from some of these
-  // on the side in anticipation of later queries by the client. NOTE
-  // that we leave the process running. The main thread is responsible
-  // for suspending and resuming all currently-active threads upon
-  // client attach and detach.
-
-  while (true) {
-    DEBUG_EVENT ev;
-    if (!WaitForDebugEvent(&ev, INFINITE)) {
-#ifdef DEBUGGING
-      if (++count < 10) {
-        // FIXME: This is signaling an error: "The handle is invalid." ?
-        // Do I have to do all of my WaitForDebugEvent calls from the same thread?
-        printError("WaitForDebugEvent failed");
-      }
-#endif
-    } else {
-
-#ifdef DEBUGGING
-      if (ev.dwDebugEventCode != lastMsgId) {
-        lastMsgId = ev.dwDebugEventCode;
-        count = 0;
-        cerr << "Debug thread received event " << ev.dwDebugEventCode << endl;
-      } else {
-        if (++count < 10) {
-          cerr << "Debug thread received event " << ev.dwDebugEventCode << endl;
-        }
-      }
-#endif
-
-      DWORD dbgContinueMode = DBG_CONTINUE;
-
-      switch (ev.dwDebugEventCode) {
-      case LOAD_DLL_DEBUG_EVENT:
-        conditionalPostDebugEvent(&ev, &dbgContinueMode);
-        break;
-
-      case UNLOAD_DLL_DEBUG_EVENT:
-        conditionalPostDebugEvent(&ev, &dbgContinueMode);
-        break;
-
-      case CREATE_PROCESS_DEBUG_EVENT:
-        threads.lock();
-        // FIXME: will this deal properly with child processes? If
-        // not, is it possible to make it do so?
-#ifdef DEBUGGING
-        cerr << "CREATE_PROCESS_DEBUG_EVENT " << ev.dwThreadId
-             << " " << ev.u.CreateProcessInfo.hThread << endl;
-#endif
-        if (ev.u.CreateProcessInfo.hThread != NULL) {
-          threads.add(ThreadInfo(ev.dwThreadId, ev.u.CreateProcessInfo.hThread));
-        }
-        threads.unlock();
-        break;
-
-      case CREATE_THREAD_DEBUG_EVENT:
-        threads.lock();
-#ifdef DEBUGGING
-        cerr << "CREATE_THREAD_DEBUG_EVENT " << ev.dwThreadId
-             << " " << ev.u.CreateThread.hThread << endl;
-#endif
-        if (suspended) {
-          // Suspend this thread before adding it to the thread list
-          SuspendThread(ev.u.CreateThread.hThread);
-        }
-        threads.add(ThreadInfo(ev.dwThreadId, ev.u.CreateThread.hThread));
-        threads.unlock();
-        break;
-
-      case EXIT_THREAD_DEBUG_EVENT:
-        threads.lock();
-#ifdef DEBUGGING
-        cerr << "EXIT_THREAD_DEBUG_EVENT " << ev.dwThreadId << endl;
-#endif
-        threads.removeByThreadID(ev.dwThreadId);
-        threads.unlock();
-        break;
-
-      case EXCEPTION_DEBUG_EVENT:
-        //      cerr << "EXCEPTION_DEBUG_EVENT" << endl;
-        switch (ev.u.Exception.ExceptionRecord.ExceptionCode) {
-        case EXCEPTION_BREAKPOINT:
-          //        cerr << "EXCEPTION_BREAKPOINT" << endl;
-          if (!attachSucceeded && !attachFailed) {
-            attachSucceeded = true;
-          }
-          break;
-
-        default:
-          dbgContinueMode = DBG_EXCEPTION_NOT_HANDLED;
-          break;
-        }
-        conditionalPostDebugEvent(&ev, &dbgContinueMode);
-        break;
-
-      case EXIT_PROCESS_DEBUG_EVENT:
-        endProcess(false);
-        // NOT REACHED
-        break;
-
-      default:
-#ifdef DEBUGGING
-        cerr << "Received debug event " << ev.dwDebugEventCode << endl;
-#endif
-        break;
-      }
-
-      ContinueDebugEvent(ev.dwProcessId, ev.dwThreadId, dbgContinueMode);
-    }
-  }
-}
-
-bool
-attachToProcess() {
-  // Create event lock
-  eventLock = new Monitor();
-
-  // Get a process handle for later
-  procHandle = OpenProcess(PROCESS_ALL_ACCESS, FALSE, pid);
-  if (procHandle == NULL) {
-    return false;
-  }
-
-  // Start up the debug thread
-  DWORD debugThreadId;
-  if (CreateThread(NULL, 0, &debugThreadEntry, NULL, 0, &debugThreadId) == NULL) {
-    // Failed to make background debug thread. Fail.
-    return false;
-  }
-
-  while ((!attachSucceeded) && (!attachFailed)) {
-    Sleep(1);
-  }
-
-  if (attachFailed) {
-    return false;
-  }
-
-  assert(attachSucceeded);
-
-  return true;
-}
-
-bool
-readMessage(Message* msg) {
-  DWORD numRead;
-  if (!ReadFile(GetStdHandle(STD_INPUT_HANDLE),
-                msg,
-                sizeof(Message),
-                &numRead,
-                NULL)) {
-    return false;
-  }
-  if (numRead != sizeof(Message)) {
-    return false;
-  }
-  // For "poke" messages, must follow up by reading raw data
-  if (msg->type == Message::POKE) {
-    char* dataBuf = new char[msg->pokeArg.numBytes];
-    if (dataBuf == NULL) {
-      return false;
-    }
-    if (!ReadFile(GetStdHandle(STD_INPUT_HANDLE),
-                  dataBuf,
-                  msg->pokeArg.numBytes,
-                  &numRead,
-                  NULL)) {
-      delete[] dataBuf;
-      return false;
-    }
-    if (numRead != msg->pokeArg.numBytes) {
-      delete[] dataBuf;
-      return false;
-    }
-    msg->pokeArg.data = (void *) dataBuf;
-  }
-  return true;
-}
-
-void
-handlePeek(Message* msg) {
-#ifdef DEBUGGING
-  cerr << "Entering handlePeek()" << endl;
-#endif
-
-  char* memBuf = new char[msg->peekArg.numBytes];
-  if (memBuf == NULL) {
-    ioBuf->writeString("B");
-    ioBuf->writeBinChar(0);
-    ioBuf->flush();
-    delete[] memBuf;
-    return;
-  }
-
-  // Try fast case first
-  DWORD numRead;
-  BOOL res = ReadProcessMemory(procHandle,
-                               (LPCVOID) msg->peekArg.address,
-                               memBuf,
-                               msg->peekArg.numBytes,
-                               &numRead);
-  if (res && (numRead == msg->peekArg.numBytes)) {
-
-    // OK, complete success. Phew.
-#ifdef DEBUGGING
-    cerr << "Peek success case" << endl;
-#endif
-    ioBuf->writeString("B");
-    ioBuf->writeBinChar(1);
-    ioBuf->writeBinUnsignedInt(numRead);
-    ioBuf->writeBinChar(1);
-    ioBuf->writeBinBuf(memBuf, numRead);
-  } else {
-#ifdef DEBUGGING
-    cerr << "*** Peek slow case ***" << endl;
-#endif
-
-    ioBuf->writeString("B");
-    ioBuf->writeBinChar(1);
-
-    // Use VirtualQuery to speed things up a bit
-    DWORD numLeft = msg->peekArg.numBytes;
-    char* curAddr = (char*) msg->peekArg.address;
-    while (numLeft > 0) {
-      MEMORY_BASIC_INFORMATION memInfo;
-      VirtualQueryEx(procHandle, curAddr, &memInfo, sizeof(memInfo));
-      DWORD numToRead = memInfo.RegionSize;
-      if (numToRead > numLeft) {
-        numToRead = numLeft;
-      }
-      DWORD numRead;
-      if (memInfo.State == MEM_COMMIT) {
-        // Read the process memory at this address for this length
-        // FIXME: should check the result of this read
-        ReadProcessMemory(procHandle, curAddr, memBuf,
-                          numToRead, &numRead);
-        // Write this out
-#ifdef DEBUGGING
-        cerr << "*** Writing " << numToRead << " bytes as mapped ***" << endl;
-#endif
-        ioBuf->writeBinUnsignedInt(numToRead);
-        ioBuf->writeBinChar(1);
-        ioBuf->writeBinBuf(memBuf, numToRead);
-      } else {
-        // Indicate region is free
-#ifdef DEBUGGING
-        cerr << "*** Writing " << numToRead << " bytes as unmapped ***" << endl;
-#endif
-        ioBuf->writeBinUnsignedInt(numToRead);
-        ioBuf->writeBinChar(0);
-      }
-      curAddr += numToRead;
-      numLeft -= numToRead;
-    }
-  }
-
-  ioBuf->flush();
-  delete[] memBuf;
-#ifdef DEBUGGING
-  cerr << "Exiting handlePeek()" << endl;
-#endif
-}
-
-void
-handlePoke(Message* msg) {
-#ifdef DEBUGGING
-  cerr << "Entering handlePoke()" << endl;
-#endif
-  DWORD numWritten;
-  BOOL res = WriteProcessMemory(procHandle,
-                                (LPVOID) msg->pokeArg.address,
-                                msg->pokeArg.data,
-                                msg->pokeArg.numBytes,
-                                &numWritten);
-  if (res && (numWritten == msg->pokeArg.numBytes)) {
-    // Success
-    ioBuf->writeBoolAsInt(true);
-#ifdef DEBUGGING
-    cerr << " (Succeeded)" << endl;
-#endif
-  } else {
-    // Failure
-    ioBuf->writeBoolAsInt(false);
-#ifdef DEBUGGING
-    cerr << " (Failed)" << endl;
-#endif
-  }
-  ioBuf->writeEOL();
-  ioBuf->flush();
-  // We clean up the data
-  char* dataBuf = (char*) msg->pokeArg.data;
-  delete[] dataBuf;
-#ifdef DEBUGGING
-  cerr << "Exiting handlePoke()" << endl;
-#endif
-}
-
-bool
-suspend() {
-  if (suspended) {
-    return false;
-  }
-  // Before we suspend, we must take a snapshot of the loaded module
-  // names and base addresses, since acquiring this snapshot requires
-  // starting and exiting a thread in the remote process (at least on
-  // NT 4).
-  libs.clear();
-#ifdef DEBUGGING
-  cerr << "Starting suspension" << endl;
-#endif
-  libInfo(pid, libs);
-#ifdef DEBUGGING
-  cerr << "  Got lib info" << endl;
-#endif
-  threads.lock();
-#ifdef DEBUGGING
-  cerr << "  Got thread lock" << endl;
-#endif
-  suspended = true;
-  int j = 0;
-  for (int i = 0; i < threads.size(); i++) {
-    j++;
-    SuspendThread(threads.get(i).thread);
-  }
-#ifdef DEBUGGING
-  cerr << "Suspended " << j << " threads" << endl;
-#endif
-  threads.unlock();
-  return true;
-}
-
-bool
-resume() {
-  if (!suspended) {
-    return false;
-  }
-  threads.lock();
-  suspended = false;
-  for (int i = 0; i < threads.size(); i++) {
-    ResumeThread(threads.get(i).thread);
-  }
-  threads.unlock();
-#ifdef DEBUGGING
-  cerr << "Resumed process" << endl;
-#endif
-  return true;
-}
-
-int
-main(int argc, char **argv)
-{
-  if (argc != 2) {
-    // Should only be used by performing CreateProcess within SwDbgSrv
-    exit(1);
-  }
-
-  if (sscanf(argv[1], "%u", &pid) != 1) {
-    exit(1);
-  }
-
-  // Try to attach to process
-  if (!attachToProcess()) {
-    // Attach failed. Notify parent by writing result to stdout file
-    // handle.
-    char res = 0;
-    DWORD numBytes;
-    WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), &res, sizeof(res),
-              &numBytes, NULL);
-    exit(1);
-  }
-
-  // Server is expecting success result back.
-  char res = 1;
-  DWORD numBytes;
-  WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), &res, sizeof(res),
-            &numBytes, NULL);
-
-  // Initialize our I/O buffer
-  ioBuf = new IOBuf(32768, 131072);
-  ioBuf->setOutputFileHandle(GetStdHandle(STD_OUTPUT_HANDLE));
-
-  // At this point we are attached. Enter our main loop which services
-  // requests from the server. Note that in order to handle attach/
-  // detach properly (i.e., resumption of process upon "detach") we
-  // will need another thread which handles debug events.
-  while (true) {
-    // Read a message from the server
-    Message msg;
-    if (!readMessage(&msg)) {
-      endProcess();
-    }
-
-#ifdef DEBUGGING
-    cerr << "Main thread read message: " << msg.type << endl;
-#endif
-
-    switch (msg.type) {
-    // ATTACH and DETACH messages MUST come in pairs
-    case Message::ATTACH:
-      suspend();
-      eventLock->lock();
-      generateDebugEvents = true;
-      eventLock->unlock();
-      break;
-
-    case Message::DETACH:
-      eventLock->lock();
-      generateDebugEvents = false;
-      // Flush remaining event if any
-      if (curDebugEvent != NULL) {
-        curDebugEvent = NULL;
-        eventLock->notifyAll();
-      }
-      eventLock->unlock();
-      resume();
-      break;
-
-    case Message::LIBINFO:
-      {
-        if (!suspended) {
-          ioBuf->writeInt(0);
-        } else {
-          // Send back formatted text
-          ioBuf->writeInt(libs.size());
-          for (int i = 0; i < libs.size(); i++) {
-            ioBuf->writeSpace();
-            ioBuf->writeInt(1);
-            ioBuf->writeSpace();
-            ioBuf->writeInt(libs[i].name.size());
-            ioBuf->writeSpace();
-            ioBuf->writeString(libs[i].name.c_str());
-            ioBuf->writeSpace();
-            ioBuf->writeAddress(libs[i].base);
-          }
-        }
-        ioBuf->writeEOL();
-        ioBuf->flush();
-        break;
-      }
-
-    case Message::PEEK:
-      handlePeek(&msg);
-      break;
-
-    case Message::POKE:
-      handlePoke(&msg);
-      break;
-
-    case Message::THREADLIST:
-      {
-        if (!suspended) {
-          ioBuf->writeInt(0);
-        } else {
-          threads.lock();
-          ioBuf->writeInt(threads.size());
-          for (int i = 0; i < threads.size(); i++) {
-            ioBuf->writeSpace();
-            ioBuf->writeAddress((void*) threads.get(i).thread);
-          }
-          threads.unlock();
-        }
-        ioBuf->writeEOL();
-        ioBuf->flush();
-        break;
-      }
-
-    case Message::DUPHANDLE:
-      {
-        HANDLE dup;
-        if (DuplicateHandle(procHandle,
-                            msg.handleArg.handle,
-                            GetCurrentProcess(),
-                            &dup,
-                            0,
-                            FALSE,
-                            DUPLICATE_SAME_ACCESS)) {
-          ioBuf->writeBoolAsInt(true);
-          ioBuf->writeSpace();
-          ioBuf->writeAddress((void*) dup);
-        } else {
-          ioBuf->writeBoolAsInt(false);
-        }
-        ioBuf->writeEOL();
-        ioBuf->flush();
-        break;
-      }
-
-    case Message::CLOSEHANDLE:
-      {
-        CloseHandle(msg.handleArg.handle);
-        break;
-      }
-
-    case Message::GETCONTEXT:
-      {
-        if (!suspended) {
-          ioBuf->writeBoolAsInt(false);
-        } else {
-          CONTEXT context;
-          context.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG_REGISTERS;
-          if (GetThreadContext(msg.handleArg.handle, &context)) {
-            ioBuf->writeBoolAsInt(true);
-            // EAX, EBX, ECX, EDX, ESI, EDI, EBP, ESP, EIP, DS, ES, FS, GS,
-            // CS, SS, EFLAGS, DR0, DR1, DR2, DR3, DR6, DR7
-            // See README-commands.txt
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Eax);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Ebx);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Ecx);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Edx);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Esi);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Edi);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Ebp);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Esp);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Eip);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.SegDs);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.SegEs);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.SegFs);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.SegGs);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.SegCs);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.SegSs);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.EFlags);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Dr0);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Dr1);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Dr2);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Dr3);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Dr6);
-            ioBuf->writeSpace(); ioBuf->writeAddress((void*) context.Dr7);
-          } else {
-            ioBuf->writeBoolAsInt(false);
-          }
-        }
-        ioBuf->writeEOL();
-        ioBuf->flush();
-        break;
-      }
-
-    case Message::SETCONTEXT:
-      {
-        if (!suspended) {
-          ioBuf->writeBoolAsInt(false);
-        } else {
-          CONTEXT context;
-          context.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG_REGISTERS;
-          context.Eax    = msg.setContextArg.Eax;
-          context.Ebx    = msg.setContextArg.Ebx;
-          context.Ecx    = msg.setContextArg.Ecx;
-          context.Edx    = msg.setContextArg.Edx;
-          context.Esi    = msg.setContextArg.Esi;
-          context.Edi    = msg.setContextArg.Edi;
-          context.Ebp    = msg.setContextArg.Ebp;
-          context.Esp    = msg.setContextArg.Esp;
-          context.Eip    = msg.setContextArg.Eip;
-          context.SegDs  = msg.setContextArg.Ds;
-          context.SegEs  = msg.setContextArg.Es;
-          context.SegFs  = msg.setContextArg.Fs;
-          context.SegGs  = msg.setContextArg.Gs;
-          context.SegCs  = msg.setContextArg.Cs;
-          context.SegSs  = msg.setContextArg.Ss;
-          context.EFlags = msg.setContextArg.EFlags;
-          context.Dr0    = msg.setContextArg.Dr0;
-          context.Dr1    = msg.setContextArg.Dr1;
-          context.Dr2    = msg.setContextArg.Dr2;
-          context.Dr3    = msg.setContextArg.Dr3;
-          context.Dr6    = msg.setContextArg.Dr6;
-          context.Dr7    = msg.setContextArg.Dr7;
-          if (SetThreadContext(msg.setContextArg.handle, &context)) {
-            ioBuf->writeBoolAsInt(true);
-          } else {
-            ioBuf->writeBoolAsInt(false);
-          }
-        }
-        ioBuf->writeEOL();
-        ioBuf->flush();
-        break;
-      }
-
-    case Message::SELECTORENTRY:
-      {
-        LDT_ENTRY entry;
-
-        if (GetThreadSelectorEntry(msg.selectorArg.handle,
-                                   msg.selectorArg.selector,
-                                   &entry)) {
-          ioBuf->writeBoolAsInt(true);
-          ioBuf->writeSpace(); ioBuf->writeAddress((void*) entry.LimitLow);
-          ioBuf->writeSpace(); ioBuf->writeAddress((void*) entry.BaseLow);
-          ioBuf->writeSpace(); ioBuf->writeAddress((void*) entry.HighWord.Bytes.BaseMid);
-          ioBuf->writeSpace(); ioBuf->writeAddress((void*) entry.HighWord.Bytes.Flags1);
-          ioBuf->writeSpace(); ioBuf->writeAddress((void*) entry.HighWord.Bytes.Flags2);
-          ioBuf->writeSpace(); ioBuf->writeAddress((void*) entry.HighWord.Bytes.BaseHi);
-        } else {
-          ioBuf->writeBoolAsInt(false);
-        }
-
-        ioBuf->writeEOL();
-        ioBuf->flush();
-        break;
-      }
-
-    case Message::SUSPEND:
-      suspend();
-      break;
-
-    case Message::RESUME:
-      resume();
-      break;
-
-    case Message::POLLEVENT:
-      eventLock->lock();
-      if (curDebugEvent == NULL) {
-        ioBuf->writeBoolAsInt(false);
-      } else {
-        ioBuf->writeBoolAsInt(true);
-        ioBuf->writeSpace();
-        threads.lock();
-        ioBuf->writeAddress((void*) threads.threadIDToHandle(curDebugEvent->dwThreadId));
-        threads.unlock();
-        ioBuf->writeSpace();
-        ioBuf->writeUnsignedInt(curDebugEvent->dwDebugEventCode);
-        // Figure out what else to write
-        switch (curDebugEvent->dwDebugEventCode) {
-        case LOAD_DLL_DEBUG_EVENT:
-          ioBuf->writeSpace();
-          ioBuf->writeAddress(curDebugEvent->u.LoadDll.lpBaseOfDll);
-          break;
-
-        case UNLOAD_DLL_DEBUG_EVENT:
-          ioBuf->writeSpace();
-          ioBuf->writeAddress(curDebugEvent->u.UnloadDll.lpBaseOfDll);
-          break;
-
-        case EXCEPTION_DEBUG_EVENT:
-          {
-            DWORD code = curDebugEvent->u.Exception.ExceptionRecord.ExceptionCode;
-            ioBuf->writeSpace();
-            ioBuf->writeUnsignedInt(code);
-            ioBuf->writeSpace();
-            ioBuf->writeAddress(curDebugEvent->u.Exception.ExceptionRecord.ExceptionAddress);
-            switch (curDebugEvent->u.Exception.ExceptionRecord.ExceptionCode) {
-            case EXCEPTION_ACCESS_VIOLATION:
-              ioBuf->writeSpace();
-              ioBuf->writeBoolAsInt(curDebugEvent->u.Exception.ExceptionRecord.ExceptionInformation[0] != 0);
-              ioBuf->writeSpace();
-              ioBuf->writeAddress((void*) curDebugEvent->u.Exception.ExceptionRecord.ExceptionInformation[1]);
-              break;
-
-            default:
-              break;
-            }
-            break;
-          }
-
-        default:
-          break;
-        }
-      }
-      eventLock->unlock();
-      ioBuf->writeEOL();
-      ioBuf->flush();
-      break;
-
-    case Message::CONTINUEEVENT:
-      eventLock->lock();
-      if (curDebugEvent == NULL) {
-        ioBuf->writeBoolAsInt(false);
-      } else {
-        curDebugEvent = NULL;
-        passEventToClient = msg.boolArg.val;
-        ioBuf->writeBoolAsInt(true);
-        eventLock->notify();
-      }
-      eventLock->unlock();
-      ioBuf->writeEOL();
-      ioBuf->flush();
-      break;
-    }
-  }
-
-  endProcess();
-
-  // NOT REACHED
-  return 0;
-}
--- a/agent/src/os/win32/SwDbgSub.dsp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-# Microsoft Developer Studio Project File - Name="SwDbgSub" - Package Owner=<4>
-# Microsoft Developer Studio Generated Build File, Format Version 6.00
-# ** DO NOT EDIT **
-
-# TARGTYPE "Win32 (x86) Console Application" 0x0103
-
-CFG=SwDbgSub - Win32 Debug
-!MESSAGE This is not a valid makefile. To build this project using NMAKE,
-!MESSAGE use the Export Makefile command and run
-!MESSAGE 
-!MESSAGE NMAKE /f "SwDbgSub.mak".
-!MESSAGE 
-!MESSAGE You can specify a configuration when running NMAKE
-!MESSAGE by defining the macro CFG on the command line. For example:
-!MESSAGE 
-!MESSAGE NMAKE /f "SwDbgSub.mak" CFG="SwDbgSub - Win32 Debug"
-!MESSAGE 
-!MESSAGE Possible choices for configuration are:
-!MESSAGE 
-!MESSAGE "SwDbgSub - Win32 Release" (based on "Win32 (x86) Console Application")
-!MESSAGE "SwDbgSub - Win32 Debug" (based on "Win32 (x86) Console Application")
-!MESSAGE 
-
-# Begin Project
-# PROP AllowPerConfigDependencies 0
-# PROP Scc_ProjName ""
-# PROP Scc_LocalPath ""
-CPP=cl.exe
-RSC=rc.exe
-
-!IF  "$(CFG)" == "SwDbgSub - Win32 Release"
-
-# PROP BASE Use_MFC 0
-# PROP BASE Use_Debug_Libraries 0
-# PROP BASE Output_Dir "Release"
-# PROP BASE Intermediate_Dir "Release"
-# PROP BASE Target_Dir ""
-# PROP Use_MFC 0
-# PROP Use_Debug_Libraries 0
-# PROP Output_Dir "Release"
-# PROP Intermediate_Dir "Release"
-# PROP Ignore_Export_Lib 0
-# PROP Target_Dir ""
-# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD BASE RSC /l 0x409 /d "NDEBUG"
-# ADD RSC /l 0x409 /d "NDEBUG"
-BSC32=bscmake.exe
-# ADD BASE BSC32 /nologo
-# ADD BSC32 /nologo
-LINK32=link.exe
-# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
-# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib ws2_32.lib /nologo /subsystem:console /machine:I386
-
-!ELSEIF  "$(CFG)" == "SwDbgSub - Win32 Debug"
-
-# PROP BASE Use_MFC 0
-# PROP BASE Use_Debug_Libraries 1
-# PROP BASE Output_Dir "SwDbgSub___Win32_Debug"
-# PROP BASE Intermediate_Dir "SwDbgSub___Win32_Debug"
-# PROP BASE Target_Dir ""
-# PROP Use_MFC 0
-# PROP Use_Debug_Libraries 1
-# PROP Output_Dir "Debug"
-# PROP Intermediate_Dir "Debug"
-# PROP Ignore_Export_Lib 0
-# PROP Target_Dir ""
-# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
-# ADD CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
-# ADD BASE RSC /l 0x409 /d "_DEBUG"
-# ADD RSC /l 0x409 /d "_DEBUG"
-BSC32=bscmake.exe
-# ADD BASE BSC32 /nologo
-# ADD BSC32 /nologo
-LINK32=link.exe
-# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
-# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib ws2_32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
-
-!ENDIF 
-
-# Begin Target
-
-# Name "SwDbgSub - Win32 Release"
-# Name "SwDbgSub - Win32 Debug"
-# Begin Group "Source Files"
-
-# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"
-# Begin Source File
-
-SOURCE=.\Buffer.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\IOBuf.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\isNT4.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\libInfo.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\Monitor.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\nt4internals.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\SwDbgSub.cpp
-# End Source File
-# Begin Source File
-
-SOURCE=.\toolHelp.cpp
-# End Source File
-# End Group
-# Begin Group "Header Files"
-
-# PROP Default_Filter "h;hpp;hxx;hm;inl"
-# End Group
-# Begin Group "Resource Files"
-
-# PROP Default_Filter "ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe"
-# End Group
-# End Target
-# End Project
--- a/agent/src/os/win32/initWinsock.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include <iostream>
-#include <winsock2.h>
-
-using namespace std;
-
-void
-initWinsock()
-{
-  static int initted = 0;
-  WORD wVersionRequested;
-  WSADATA wsaData;
-  int err;
-
-  if (!initted) {
-    wVersionRequested = MAKEWORD( 2, 0 );
-
-    err = WSAStartup( wVersionRequested, &wsaData );
-    if ( err != 0 ) {
-      {
-        /* Tell the user that we couldn't find a usable */
-        /* WinSock DLL.                                 */
-        cerr << "SocketBase::SocketBase: unable to find usable "
-             << "WinSock DLL" << endl;
-        exit(1);
-      }
-    }
-
-    /* Confirm that the WinSock DLL supports 2.0.*/
-    /* Note that if the DLL supports versions greater    */
-    /* than 2.0 in addition to 2.0, it will still return */
-    /* 2.0 in wVersion since that is the version we      */
-    /* requested.                                        */
-
-    if ( LOBYTE( wsaData.wVersion ) != 2 ||
-         HIBYTE( wsaData.wVersion ) != 0 ) {
-      /* Tell the user that we couldn't find a usable */
-      /* WinSock DLL.                                  */
-      {
-        cerr << "Unable to find suitable version of WinSock DLL" << endl;
-        WSACleanup( );
-        exit(1);
-      }
-    }
-
-    initted = 1;
-  }
-}
--- a/agent/src/os/win32/initWinsock.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _INIT_WINSOCK_
-#define _INIT_WINSOCK_
-
-void initWinsock();
-
-#endif // #defined _INIT_WINSOCK_
--- a/agent/src/os/win32/ioUtils.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include <ctype.h>
-#include <string.h>
-#include "ioUtils.hpp"
-#include "IOBuf.hpp"
-
-bool
-scanInt(char** data, int* num) {
-  *num = 0;
-
-  // Skip whitespace
-  while ((**data != 0) && (isspace(**data))) {
-    ++*data;
-  }
-
-  if (**data == 0) {
-    return false;
-  }
-
-  while ((**data != 0) && (!isspace(**data))) {
-    char cur = **data;
-    if ((cur < '0') || (cur > '9')) {
-      return false;
-    }
-    *num *= 10;
-    *num += cur - '0';
-    ++*data;
-  }
-
-  return true;
-}
-
-bool
-scanUnsignedLong(char** data, unsigned long* num) {
-  *num = 0;
-
-  // Skip whitespace
-  while ((**data != 0) && (isspace(**data))) {
-    ++*data;
-  }
-
-  if (**data == 0) {
-    return false;
-  }
-
-  while ((**data != 0) && (!isspace(**data))) {
-    char cur = **data;
-    if ((cur < '0') || (cur > '9')) {
-      return false;
-    }
-    *num *= 10;
-    *num += cur - '0';
-    ++*data;
-  }
-
-  return true;
-}
-
-bool
-charToNibble(char ascii, int* value) {
-  if (ascii >= '0' && ascii <= '9') {
-    *value = ascii - '0';
-    return true;
-  } else if (ascii >= 'A' && ascii <= 'F') {
-    *value = 10 + ascii - 'A';
-    return true;
-  } else if (ascii >= 'a' && ascii <= 'f') {
-    *value = 10 + ascii - 'a';
-    return true;
-  }
-
-  return false;
-}
-
-bool
-scanAddress(char** data, unsigned long* addr) {
-  *addr = 0;
-
-  // Skip whitespace
-  while ((**data != 0) && (isspace(**data))) {
-    ++*data;
-  }
-
-  if (**data == 0) {
-    return false;
-  }
-
-  if (strncmp(*data, "0x", 2) != 0) {
-    return false;
-  }
-
-  *data += 2;
-
-  while ((**data != 0) && (!isspace(**data))) {
-    int val;
-    bool res = charToNibble(**data, &val);
-    if (!res) {
-      return false;
-    }
-    *addr <<= 4;
-    *addr |= val;
-    ++*data;
-  }
-
-  return true;
-}
-
-bool
-scanAndSkipBinEscapeChar(char** data) {
-  // Skip whitespace
-  while ((**data != 0) && (isspace(**data))) {
-    ++*data;
-  }
-
-  if (!IOBuf::isBinEscapeChar(**data)) {
-    return false;
-  }
-
-  ++*data;
-
-  return true;
-}
-
-bool
-scanBinUnsignedLong(char** data, unsigned long* num) {
-  *num = 0;
-  for (int i = 0; i < 4; i++) {
-    unsigned char val = (unsigned char) **data;
-    *num = (*num << 8) | val;
-    ++*data;
-  }
-  return true;
-}
--- a/agent/src/os/win32/ioUtils.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _IO_UTILS_
-#define _IO_UTILS_
-
-bool scanInt(char** data, int* num);
-bool scanUnsignedLong(char** data, unsigned long* num);
-bool scanAddress(char** data, unsigned long* addr);
-
-// Binary utils (for poke)
-bool scanAndSkipBinEscapeChar(char** data);
-bool scanBinUnsignedLong(char** data, unsigned long* num);
-
-#endif  // #defined _IO_UTILS_
--- a/agent/src/os/win32/isNT4.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "isNT4.hpp"
-#include <windows.h>
-
-bool
-isNT4() {
-  OSVERSIONINFO info;
-  info.dwOSVersionInfoSize = sizeof(info);
-
-  if (!GetVersionEx(&info)) {
-    return false;
-  }
-
-  return ((info.dwPlatformId == VER_PLATFORM_WIN32_NT) &&
-          (info.dwMajorVersion == 4));
-}
--- a/agent/src/os/win32/isNT4.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _ISNT4_H_
-#define _ISNT4_H_
-
-// We need to special-case the Windows NT 4.0 implementations of some
-// of the debugging routines because the Tool Help API is not
-// available on this platform.
-
-bool isNT4();
-
-#endif  // #defined _ISNT4_H_
--- a/agent/src/os/win32/libInfo.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,186 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-// Disable too-long symbol warnings
-#pragma warning ( disable : 4786 )
-
-#include "libInfo.hpp"
-#include "nt4internals.hpp"
-#include "isNT4.hpp"
-#include "toolHelp.hpp"
-#include <assert.h>
-
-using namespace std;
-
-typedef void LibInfoImplFunc(DWORD pid, vector<LibInfo>& info);
-
-static void libInfoImplNT4(DWORD pid, vector<LibInfo>& info);
-static void libInfoImplToolHelp(DWORD pid, vector<LibInfo>& info);
-
-void
-libInfo(DWORD pid, vector<LibInfo>& info) {
-  static LibInfoImplFunc* impl = NULL;
-
-  if (impl == NULL) {
-    // See which operating system we're on
-    impl = (isNT4() ? &libInfoImplNT4 : &libInfoImplToolHelp);
-  }
-
-  assert(impl != NULL);
-
-  (*impl)(pid, info);
-}
-
-static ULONG
-ModuleCount(NT4::PDEBUG_BUFFER db) {
-  return db->ModuleInformation ? *PULONG(db->ModuleInformation) : 0;
-}
-
-#define MAX2(a, b) (((a) < (b)) ? (b) : (a))
-
-static void
-libInfoImplNT4(DWORD pid, vector<LibInfo>& info) {
-  static EnumProcessModulesFunc*   enumFunc = NULL;
-  static GetModuleFileNameExFunc*  fnFunc   = NULL;
-  static GetModuleInformationFunc* infoFunc = NULL;
-
-  if (enumFunc == NULL) {
-    HMODULE dll = loadPSAPIDLL();
-
-    enumFunc = (EnumProcessModulesFunc*)   GetProcAddress(dll, "EnumProcessModules");
-    fnFunc   = (GetModuleFileNameExFunc*)  GetProcAddress(dll, "GetModuleFileNameExA");
-    infoFunc = (GetModuleInformationFunc*) GetProcAddress(dll, "GetModuleInformation");
-
-    assert(enumFunc != NULL);
-    assert(fnFunc   != NULL);
-    assert(infoFunc != NULL);
-  }
-
-  static HMODULE* mods = new HMODULE[256];
-  static int      numMods = 256;
-
-  if (mods == NULL) {
-    mods = new HMODULE[numMods];
-    if (mods == NULL) {
-      return;
-    }
-  }
-
-  bool done = false;
-
-  HANDLE proc = OpenProcess(PROCESS_ALL_ACCESS, FALSE, pid);
-  if (proc == NULL) {
-    return;
-  }
-
-  do {
-    DWORD bufSize = numMods * sizeof(HMODULE);
-    DWORD neededSize;
-
-    if (!(*enumFunc)(proc, mods, bufSize, &neededSize)) {
-      // Enum failed
-      CloseHandle(proc);
-      return;
-    }
-
-    int numFetched = neededSize / sizeof(HMODULE);
-
-    if (numMods < numFetched) {
-      // Grow buffer
-      numMods = MAX2(numFetched, 2 * numMods);
-      delete[] mods;
-      mods = new HMODULE[numMods];
-      if (mods == NULL) {
-        CloseHandle(proc);
-        return;
-      }
-    } else {
-      char filename[MAX_PATH];
-      MODULEINFO modInfo;
-
-      // Iterate through and fetch each one's info
-      for (int i = 0; i < numFetched; i++) {
-        if (!(*fnFunc)(proc, mods[i], filename, MAX_PATH)) {
-          CloseHandle(proc);
-          return;
-        }
-
-        if (!(*infoFunc)(proc, mods[i], &modInfo, sizeof(MODULEINFO))) {
-          CloseHandle(proc);
-          return;
-        }
-
-        info.push_back(LibInfo(string(filename), (void*) modInfo.lpBaseOfDll));
-      }
-
-      done = true;
-    }
-  } while (!done);
-
-  CloseHandle(proc);
-  return;
-}
-
-void
-libInfoImplToolHelp(DWORD pid, vector<LibInfo>& info) {
-  using namespace ToolHelp;
-
-  static CreateToolhelp32SnapshotFunc* snapshotFunc = NULL;
-  static Module32FirstFunc*            firstFunc    = NULL;
-  static Module32NextFunc*             nextFunc     = NULL;
-
-  if (snapshotFunc == NULL) {
-    HMODULE dll = loadDLL();
-
-    snapshotFunc =
-      (CreateToolhelp32SnapshotFunc*) GetProcAddress(dll,
-                                                     "CreateToolhelp32Snapshot");
-
-    firstFunc = (Module32FirstFunc*) GetProcAddress(dll,
-                                                    "Module32First");
-
-    nextFunc = (Module32NextFunc*) GetProcAddress(dll,
-                                                  "Module32Next");
-
-    assert(snapshotFunc != NULL);
-    assert(firstFunc    != NULL);
-    assert(nextFunc     != NULL);
-  }
-
-  HANDLE snapshot = (*snapshotFunc)(TH32CS_SNAPMODULE, pid);
-  if (snapshot == (HANDLE) -1) {
-    // Error occurred during snapshot
-    return;
-  }
-
-  // Iterate
-  MODULEENTRY32 module;
-  if ((*firstFunc)(snapshot, &module)) {
-    do {
-      info.push_back(LibInfo(string(module.szExePath), (void*) module.modBaseAddr));
-    } while ((*nextFunc)(snapshot, &module));
-  }
-
-  CloseHandle(snapshot);
-}
--- a/agent/src/os/win32/libInfo.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _LIBINFO_
-#define _LIBINFO_
-
-#include <vector>
-#include <string>
-#include <windows.h>
-
-struct LibInfo {
-  std::string name;
-  void*  base;
-
-  LibInfo(const std::string& name, void* base) {
-    this->name = name;
-    this->base = base;
-  }
-};
-
-void libInfo(DWORD pid, std::vector<LibInfo>& info);
-
-#endif  // #defined _LIBINFO_
--- a/agent/src/os/win32/nt4internals.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "nt4internals.hpp"
-#include <stdio.h>
-#include <stdlib.h>
-#include <assert.h>
-
-namespace NT4 {
-
-static HMODULE ntDLL = NULL;
-
-HMODULE loadNTDLL() {
-  if (ntDLL == NULL) {
-    ntDLL = LoadLibrary("NTDLL.DLL");
-  }
-
-  assert(ntDLL != NULL);
-  return ntDLL;
-}
-
-void unloadNTDLL() {
-  if (ntDLL != NULL) {
-    FreeLibrary(ntDLL);
-    ntDLL = NULL;
-  }
-}
-
-} // namespace NT4
-
-static HMODULE psapiDLL = NULL;
-
-HMODULE
-loadPSAPIDLL() {
-  if (psapiDLL == NULL) {
-    psapiDLL = LoadLibrary("PSAPI.DLL");
-  }
-
-  if (psapiDLL == NULL) {
-    fprintf(stderr, "Simple Windows Debug Server requires PSAPI.DLL on Windows NT 4.0.\n");
-    fprintf(stderr, "Please install this DLL from the SDK and restart the server.\n");
-    exit(1);
-  }
-
-  return psapiDLL;
-}
-
-void
-unloadPSAPIDLL() {
-  if (psapiDLL != NULL) {
-    FreeLibrary(psapiDLL);
-    psapiDLL = NULL;
-  }
-}
--- a/agent/src/os/win32/nt4internals.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,273 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _NT4INTERNALS_H_
-#define _NT4INTERNALS_H_
-
-#include <windows.h>
-
-namespace NT4 {
-extern "C" {
-
-// Data structures and constants required to be able to get necessary
-// debugging-related information on Windows NT 4.0 through internal
-// (i.e., non-public) APIs. These are adapted from those in the
-// _Windows NT/2000 Native API Reference_ by Gary Nebbett, Macmillan
-// Technical Publishing, 201 West 103rd Street, Indianapolis, IN
-// 46290, 2000.
-
-typedef LONG NTSTATUS;
-typedef LONG KPRIORITY;
-
-#if (_MSC_VER >= 800) || defined(_STDCALL_SUPPORTED)
-#define NTAPI __stdcall
-#else
-#define _cdecl
-#define NTAPI
-#endif
-
-#define STATUS_INFO_LENGTH_MISMATCH ((NTSTATUS)0xC0000004L)
-
-typedef enum _SYSTEM_INFORMATION_CLASS {
-  SystemProcessesAndThreadsInformation = 5
-} SYSTEM_INFORMATION_CLASS;
-
-typedef struct _UNICODE_STRING {
-  USHORT Length;
-  USHORT MaximumLength;
-  PWSTR  Buffer;
-} UNICODE_STRING;
-
-typedef struct _VM_COUNTERS {
-  ULONG PeakVirtualSize;
-  ULONG VirtualSize;
-  ULONG PageFaultCount;
-  ULONG PeakWorkingSetSize;
-  ULONG WorkingSetSize;
-  ULONG QuotaPeakPagedPoolUsage;
-  ULONG QuotaPagedPoolUsage;
-  ULONG QuotaPeakNonPagedPoolUsage;
-  ULONG QuotaNonPagedPoolUsage;
-  ULONG PagefileUsage;
-  ULONG PeakPagefileUsage;
-} VM_COUNTERS, *PVM_COUNTERS;
-
-typedef struct _IO_COUNTERS {
-  LARGE_INTEGER ReadOperationCount;
-  LARGE_INTEGER WriteOperationCount;
-  LARGE_INTEGER OtherOperationCount;
-  LARGE_INTEGER ReadTransferCount;
-  LARGE_INTEGER WriteTransferCount;
-  LARGE_INTEGER OtherTransferCount;
-} IO_COUNTERS, *PIO_COUNTERS;
-
-typedef struct _CLIENT_ID {
-  HANDLE UniqueProcess;
-  HANDLE UniqueThread;
-} CLIENT_ID, *PCLIENT_ID;
-
-typedef enum {
-  StateInitialized,
-  StateReady,
-  StateRunning,
-  StateStandby,
-  StateTerminated,
-  StateWait,
-  StateTransition,
-  StateUnknown
-} THREAD_STATE;
-
-typedef enum {
-  Executive,
-  FreePage,
-  PageIn,
-  PoolAllocation,
-  DelayExecution,
-  Suspended,
-  UserRequest,
-  WrExecutive,
-  WrFreePage,
-  WrPageIn,
-  WrPoolAllocation,
-  WrDelayExecution,
-  WrSuspended,
-  WrUserRequest,
-  WrEventPair,
-  WrQueue,
-  WrLpcReceive,
-  WrLpcReply,
-  WrVirtualMemory,
-  WrPageOut,
-  WrRendezvous,
-  Spare2,
-  Spare3,
-  Spare4,
-  Spare5,
-  Spare6,
-  WrKernel
-} KWAIT_REASON;
-
-typedef struct _SYSTEM_THREADS {
-  LARGE_INTEGER KernelTime;
-  LARGE_INTEGER UserTime;
-  LARGE_INTEGER CreateTime;
-  ULONG WaitTime;
-  PVOID StartAddress;
-  CLIENT_ID ClientId;
-  KPRIORITY Priority;
-  KPRIORITY BasePriority;
-  ULONG ContextSwitchCount;
-  THREAD_STATE State;
-  KWAIT_REASON WaitReason;
-} SYSTEM_THREADS, *PSYSTEM_THREADS;
-
-typedef struct _SYSTEM_PROCESSES { // Information class 5
-  ULONG NextEntryDelta;
-  ULONG ThreadCount;
-  ULONG Reserved1[6];
-  LARGE_INTEGER CreateTime;
-  LARGE_INTEGER UserTime;
-  LARGE_INTEGER KernelTime;
-  UNICODE_STRING ProcessName;
-  KPRIORITY BasePriority;
-  ULONG ProcessId;
-  ULONG InheritedFromProcessId;
-  ULONG HandleCount;
-  ULONG Reserved2[2];
-  ULONG PrivatePageCount;
-  VM_COUNTERS VmCounters;
-  IO_COUNTERS IoCounters; // Windows 2000 only
-  SYSTEM_THREADS Threads[1];
-} SYSTEM_PROCESSES, *PSYSTEM_PROCESSES;
-
-typedef NTSTATUS NTAPI
-ZwQuerySystemInformationFunc(IN SYSTEM_INFORMATION_CLASS SystemInformationClass,
-                             IN OUT PVOID SystemInformation,
-                             IN ULONG SystemInformationLength,
-                             OUT PULONG ReturnLength OPTIONAL
-                             );
-
-typedef struct _DEBUG_BUFFER {
-  HANDLE SectionHandle;
-  PVOID  SectionBase;
-  PVOID  RemoteSectionBase;
-  ULONG  SectionBaseDelta;
-  HANDLE EventPairHandle;
-  ULONG  Unknown[2];
-  HANDLE RemoteThreadHandle;
-  ULONG  InfoClassMask;
-  ULONG  SizeOfInfo;
-  ULONG  AllocatedSize;
-  ULONG  SectionSize;
-  PVOID  ModuleInformation;
-  PVOID  BackTraceInformation;
-  PVOID  HeapInformation;
-  PVOID  LockInformation;
-  PVOID  Reserved[8];
-} DEBUG_BUFFER, *PDEBUG_BUFFER;
-
-typedef PDEBUG_BUFFER NTAPI
-RtlCreateQueryDebugBufferFunc(IN ULONG Size,
-                              IN BOOLEAN EventPair);
-
-#define PDI_MODULES     0x01 // The loaded modules of the process
-#define PDI_BACKTRACE   0x02 // The heap stack back traces
-#define PDI_HEAPS       0x04 // The heaps of the process
-#define PDI_HEAP_TAGS   0x08 // The heap tags
-#define PDI_HEAP_BLOCKS 0x10 // The heap blocks
-#define PDI_LOCKS       0x20 // The locks created by the process
-
-typedef struct _DEBUG_MODULE_INFORMATION { // c.f. SYSTEM_MODULE_INFORMATION
-  ULONG  Reserved[2];
-  ULONG  Base;
-  ULONG  Size;
-  ULONG  Flags;
-  USHORT Index;
-  USHORT Unknown;
-  USHORT LoadCount;
-  USHORT ModuleNameOffset;
-  CHAR   ImageName[256];
-} DEBUG_MODULE_INFORMATION, *PDEBUG_MODULE_INFORMATION;
-
-// Flags
-#define LDRP_STATIC_LINK             0x00000002
-#define LDRP_IMAGE_DLL               0x00000004
-#define LDRP_LOAD_IN_PROGRESS        0x00001000
-#define LDRP_UNLOAD_IN_PROGRESS      0x00002000
-#define LDRP_ENTRY_PROCESSED         0x00004000
-#define LDRP_ENTRY_INSERTED          0x00008000
-#define LDRP_CURRENT_LOAD            0x00010000
-#define LDRP_FAILED_BUILTIN_LOAD     0x00020000
-#define LDRP_DONT_CALL_FOR_THREADS   0x00040000
-#define LDRP_PROCESS_ATTACH_CALLED   0x00080000
-#define LDRP_DEBUG_SYMBOLS_LOADED    0x00100000
-#define LDRP_IMAGE_NOT_AT_BASE       0x00200000
-#define LDRP_WX86_IGNORE_MACHINETYPE 0x00400000
-
-// NOTE that this will require creating a thread in the target
-// process, implying that we can not call this while the process is
-// suspended. May have to run this command in the child processes
-// rather than the server.
-
-typedef NTSTATUS NTAPI
-RtlQueryProcessDebugInformationFunc(IN ULONG ProcessId,
-                                    IN ULONG DebugInfoClassMask,
-                                    IN OUT PDEBUG_BUFFER DebugBuffer);
-
-typedef NTSTATUS NTAPI
-RtlDestroyQueryDebugBufferFunc(IN PDEBUG_BUFFER DebugBuffer);
-
-// Routines to load and unload NTDLL.DLL.
-HMODULE loadNTDLL();
-// Safe to call even if has not been loaded
-void    unloadNTDLL();
-
-} // extern "C"
-} // namespace NT4
-
-//----------------------------------------------------------------------
-
-// On NT 4 only, we now use PSAPI to enumerate the loaded modules in
-// the target processes. RtlQueryProcessDebugInformation creates a
-// thread in the target process, which causes problems when we are
-// handling events like breakpoints in the debugger. The dependence on
-// an external DLL which might not be present is unfortunate, but we
-// can either redistribute this DLL (if allowed) or refuse to start on
-// NT 4 if it is not present.
-
-typedef struct _MODULEINFO {
-    LPVOID lpBaseOfDll;
-    DWORD SizeOfImage;
-    LPVOID EntryPoint;
-} MODULEINFO, *LPMODULEINFO;
-
-typedef BOOL (WINAPI EnumProcessModulesFunc)(HANDLE, HMODULE *, DWORD, LPDWORD);
-typedef DWORD (WINAPI GetModuleFileNameExFunc)(HANDLE, HMODULE, LPTSTR, DWORD);
-typedef BOOL (WINAPI GetModuleInformationFunc)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
-// Routines to load and unload PSAPI.DLL.
-HMODULE loadPSAPIDLL();
-// Safe to call even if has not been loaded
-void    unloadPSAPIDLL();
-
-#endif // #defined _NT4INTERNALS_H_
--- a/agent/src/os/win32/ports.h	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _PORTS_H_
-#define _PORTS_H_
-
-// This is the "public" port which end-user clients can connect to
-// with an arbitrary application, including telnet.
-const short CLIENT_PORT = 27000;
-
-#endif  // #defined _PORTS_H_
--- a/agent/src/os/win32/procList.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "procList.hpp"
-#include "nt4internals.hpp"
-#include "isNT4.hpp"
-#include "toolHelp.hpp"
-#include <assert.h>
-
-using namespace std;
-using namespace NT4;
-
-typedef void ProcListImplFunc(ProcEntryList& processes);
-
-void procListImplNT4(ProcEntryList& processes);
-void procListImplToolHelp(ProcEntryList& processes);
-
-ProcEntry::ProcEntry(ULONG pid, USHORT nameLength, WCHAR* name) {
-  this->pid = pid;
-  this->nameLength = nameLength;
-  this->name = new WCHAR[nameLength];
-  memcpy(this->name, name, nameLength * sizeof(WCHAR));
-}
-
-ProcEntry::ProcEntry(ULONG pid, USHORT nameLength, char* name) {
-  this->pid = pid;
-  this->nameLength = nameLength;
-  this->name = new WCHAR[nameLength];
-  int j = 0;
-  for (int i = 0; i < nameLength; i++) {
-    // FIXME: what is the proper promotion from ASCII to UNICODE?
-    this->name[i] = name[i] & 0xFF;
-  }
-}
-
-ProcEntry::ProcEntry(const ProcEntry& arg) {
-  name = NULL;
-  copyFrom(arg);
-}
-
-ProcEntry&
-ProcEntry::operator=(const ProcEntry& arg) {
-  copyFrom(arg);
-  return *this;
-}
-
-ProcEntry::~ProcEntry() {
-  delete[] name;
-}
-
-void
-ProcEntry::copyFrom(const ProcEntry& arg) {
-  if (name != NULL) {
-    delete[] name;
-  }
-  pid = arg.pid;
-  nameLength = arg.nameLength;
-  name = new WCHAR[nameLength];
-  memcpy(name, arg.name, nameLength * sizeof(WCHAR));
-}
-
-ULONG
-ProcEntry::getPid() {
-  return pid;
-}
-
-USHORT
-ProcEntry::getNameLength() {
-  return nameLength;
-}
-
-WCHAR*
-ProcEntry::getName() {
-  return name;
-}
-
-void
-procList(ProcEntryList& processes) {
-  static ProcListImplFunc* impl = NULL;
-
-  if (impl == NULL) {
-    // See which operating system we're on
-    impl = (isNT4() ? &procListImplNT4 : &procListImplToolHelp);
-  }
-
-  assert(impl != NULL);
-
-  (*impl)(processes);
-}
-
-void
-procListImplNT4(ProcEntryList& processes) {
-  using namespace NT4;
-
-  static ZwQuerySystemInformationFunc* query = NULL;
-
-  if (query == NULL) {
-    HMODULE ntDLL = loadNTDLL();
-    query =
-      (ZwQuerySystemInformationFunc*) GetProcAddress(ntDLL,
-                                                     "ZwQuerySystemInformation");
-    assert(query != NULL);
-  }
-
-  ULONG n = 0x100;
-  PSYSTEM_PROCESSES sp = new SYSTEM_PROCESSES[n];
-  while ((*query)(SystemProcessesAndThreadsInformation,
-                  sp, n * sizeof(SYSTEM_PROCESSES), 0) == STATUS_INFO_LENGTH_MISMATCH) {
-    delete[] sp;
-    n *= 2;
-    sp = new SYSTEM_PROCESSES[n];
-  }
-
-  bool done = false;
-  for (PSYSTEM_PROCESSES p = sp; !done;
-       p = PSYSTEM_PROCESSES(PCHAR(p) + p->NextEntryDelta)) {
-    processes.push_back(ProcEntry(p->ProcessId,
-                                  p->ProcessName.Length / 2,
-                                  p->ProcessName.Buffer));
-    done = p->NextEntryDelta == 0;
-  }
-}
-
-void
-procListImplToolHelp(ProcEntryList& processes) {
-  using namespace ToolHelp;
-
-  static CreateToolhelp32SnapshotFunc* snapshotFunc = NULL;
-  static Process32FirstFunc*           firstFunc    = NULL;
-  static Process32NextFunc*            nextFunc     = NULL;
-
-  if (snapshotFunc == NULL) {
-    HMODULE dll = loadDLL();
-
-    snapshotFunc =
-      (CreateToolhelp32SnapshotFunc*) GetProcAddress(dll,
-                                                     "CreateToolhelp32Snapshot");
-
-    firstFunc = (Process32FirstFunc*) GetProcAddress(dll,
-                                                     "Process32First");
-
-    nextFunc = (Process32NextFunc*) GetProcAddress(dll,
-                                                   "Process32Next");
-
-    assert(snapshotFunc != NULL);
-    assert(firstFunc    != NULL);
-    assert(nextFunc     != NULL);
-  }
-
-  HANDLE snapshot = (*snapshotFunc)(TH32CS_SNAPPROCESS, 0 /* ignored */);
-  if (snapshot == (HANDLE) -1) {
-    // Error occurred during snapshot
-    return;
-  }
-
-  // Iterate
-  PROCESSENTRY32 proc;
-  if ((*firstFunc)(snapshot, &proc)) {
-    do {
-      // FIXME: could make this uniform to the NT version by cutting
-      // off the path name just before the executable name
-      processes.push_back(ProcEntry(proc.th32ProcessID,
-                                    strlen(proc.szExeFile),
-                                    proc.szExeFile));
-    } while ((*nextFunc)(snapshot, &proc));
-  }
-
-  CloseHandle(snapshot);
-}
--- a/agent/src/os/win32/procList.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _PROCLIST_
-#define _PROCLIST_
-
-#include <windows.h>
-#include <vector>
-
-class ProcEntry {
-public:
-  /** name may not be NULL */
-  ProcEntry(ULONG pid, USHORT nameLength, wchar_t* name);
-  ProcEntry(ULONG pid, USHORT nameLength, char* name);
-  ~ProcEntry();
-  ProcEntry(const ProcEntry& arg);
-  ProcEntry& operator=(const ProcEntry& arg);
-
-  ULONG getPid();
-  /** Returns number of WCHAR characters in getName() */
-  USHORT getNameLength();
-  WCHAR* getName();
-
-private:
-  ULONG pid;
-  USHORT nameLength;
-  WCHAR* name;
-  void copyFrom(const ProcEntry& arg);
-};
-
-typedef std::vector<ProcEntry> ProcEntryList;
-void procList(ProcEntryList& processes);
-
-#endif  // #defined _PROCLIST_
--- a/agent/src/os/win32/serverLists.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include <assert.h>
-#include "serverLists.hpp"
-
-//----------------------------------------------------------------------
-// Lists
-//
-
-CRITICAL_SECTION Lists::crit;
-
-void
-Lists::init() {
-  InitializeCriticalSection(&crit);
-}
-
-void
-Lists::lock() {
-  EnterCriticalSection(&crit);
-}
-
-void
-Lists::unlock() {
-  LeaveCriticalSection(&crit);
-}
-
-//----------------------------------------------------------------------
-// ListsLocker
-//
-
-ListsLocker::ListsLocker() {
-  Lists::lock();
-}
-
-ListsLocker::~ListsLocker() {
-  Lists::unlock();
-}
-
-//----------------------------------------------------------------------
-// ChildInfo
-//
-
-ChildInfo::ChildInfo(DWORD pid, HANDLE childProcessHandle,
-                     HANDLE writeToStdinHandle, HANDLE readFromStdoutHandle,
-                     HANDLE auxHandle1, HANDLE auxHandle2) {
-  this->pid = pid;
-  this->childProcessHandle = childProcessHandle;
-  this->writeToStdinHandle = writeToStdinHandle;
-  this->readFromStdoutHandle = readFromStdoutHandle;
-  this->auxHandle1 = auxHandle1;
-  this->auxHandle2 = auxHandle2;
-  client = NULL;
-}
-
-DWORD
-ChildInfo::getPid() {
-  return pid;
-}
-
-HANDLE
-ChildInfo::getChildProcessHandle() {
-  return childProcessHandle;
-}
-
-HANDLE
-ChildInfo::getWriteToStdinHandle() {
-  return writeToStdinHandle;
-}
-
-HANDLE
-ChildInfo::getReadFromStdoutHandle() {
-  return readFromStdoutHandle;
-}
-
-void
-ChildInfo::setClient(ClientInfo* clientInfo) {
-  client = clientInfo;
-}
-
-ClientInfo*
-ChildInfo::getClient() {
-  return client;
-}
-
-void
-ChildInfo::closeAll() {
-  CloseHandle(childProcessHandle);
-  CloseHandle(writeToStdinHandle);
-  CloseHandle(readFromStdoutHandle);
-  CloseHandle(auxHandle1);
-  CloseHandle(auxHandle2);
-}
-
-//----------------------------------------------------------------------
-// ChildList
-//
-
-ChildList::ChildList() {
-}
-
-ChildList::~ChildList() {
-}
-
-void
-ChildList::addChild(ChildInfo* info) {
-  // Could store these in binary sorted order by pid for efficiency
-  childList.push_back(info);
-}
-
-ChildInfo*
-ChildList::removeChild(HANDLE childProcessHandle) {
-  for (ChildInfoList::iterator iter = childList.begin(); iter != childList.end();
-       iter++) {
-    ChildInfo* info = *iter;
-    if (info->getChildProcessHandle() == childProcessHandle) {
-      childList.erase(iter);
-      return info;
-    }
-  }
-  assert(false);
-  return NULL;
-}
-
-void
-ChildList::removeChild(ChildInfo* info) {
-  for (ChildInfoList::iterator iter = childList.begin(); iter != childList.end();
-       iter++) {
-    if (*iter == info) {
-      childList.erase(iter);
-      return;
-    }
-  }
-  assert(false);
-}
-
-ChildInfo*
-ChildList::getChildByPid(DWORD pid) {
-  for (ChildInfoList::iterator iter = childList.begin(); iter != childList.end();
-       iter++) {
-    ChildInfo* info = *iter;
-    if (info->getPid() == pid) {
-      return info;
-    }
-  }
-  return NULL;
-}
-
-int
-ChildList::size() {
-  return childList.size();
-}
-
-ChildInfo*
-ChildList::getChildByIndex(int index) {
-  return childList[index];
-}
-
-//----------------------------------------------------------------------
-// ClientInfo
-//
-
-ClientInfo::ClientInfo(SOCKET dataSocket) {
-  this->dataSocket = dataSocket;
-  buf = new IOBuf(32768, 131072);
-  buf->setSocket(dataSocket);
-  target = NULL;
-}
-
-ClientInfo::~ClientInfo() {
-  delete buf;
-}
-
-SOCKET
-ClientInfo::getDataSocket() {
-  return dataSocket;
-}
-
-IOBuf*
-ClientInfo::getIOBuf() {
-  return buf;
-}
-
-void
-ClientInfo::setTarget(ChildInfo* childInfo) {
-  target = childInfo;
-}
-
-ChildInfo*
-ClientInfo::getTarget() {
-  return target;
-}
-
-void
-ClientInfo::closeAll() {
-  shutdown(dataSocket, SD_BOTH);
-  closesocket(dataSocket);
-  dataSocket = INVALID_SOCKET;
-}
-
-//----------------------------------------------------------------------
-// ClientList
-//
-
-ClientList::ClientList() {
-}
-
-ClientList::~ClientList() {
-}
-
-void
-ClientList::addClient(ClientInfo* info) {
-  clientList.push_back(info);
-}
-
-bool
-ClientList::isAnyDataSocketSet(fd_set* fds, ClientInfo** out) {
-  for (ClientInfoList::iterator iter = clientList.begin(); iter != clientList.end();
-       iter++) {
-    ClientInfo* info = *iter;
-    if (FD_ISSET(info->getDataSocket(), fds)) {
-      *out = info;
-      return true;
-    }
-  }
-  return false;
-}
-
-void
-ClientList::removeClient(ClientInfo* client) {
-  for (ClientInfoList::iterator iter = clientList.begin(); iter != clientList.end();
-       iter++) {
-    if (*iter == client) {
-      clientList.erase(iter);
-      return;
-    }
-  }
-  assert(false);
-}
-
-int
-ClientList::size() {
-  return clientList.size();
-}
-
-ClientInfo*
-ClientList::get(int num) {
-  return clientList[num];
-}
--- a/agent/src/os/win32/serverLists.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _SERVER_LISTS_
-#define _SERVER_LISTS_
-
-#include <vector>
-#include <winsock2.h>
-#include "IOBuf.hpp"
-
-//
-// NOTE:
-//
-// All of these lists are guarded by the global lock managed by the
-// Lists class. Lists::init() must be called at the start of the
-// program.
-//
-
-class Lists {
-  friend class ListsLocker;
-public:
-  static void init();
-private:
-  static void lock();
-  static void unlock();
-  static CRITICAL_SECTION crit;
-};
-
-// Should be allocated on stack. Ensures proper locking/unlocking
-// pairing.
-class ListsLocker {
-public:
-  ListsLocker();
-  ~ListsLocker();
-};
-
-// We must keep track of all of the child processes we have forked to
-// handle attaching to a target process. This is necessary because we
-// allow clients to detach from processes, but the child processes we
-// fork must necessarily stay alive for the duration of the target
-// application. A subsequent attach operation to the target process
-// results in the same child process being reused. For this reason,
-// child processes are known to be in one of two states: attached and
-// detached.
-
-class ClientInfo;
-
-class ChildInfo {
-public:
-  /** The pid of the ChildInfo indicates the process ID of the target
-      process which the subprocess was created to debug, not the pid
-      of the subprocess itself. */
-  ChildInfo(DWORD pid, HANDLE childProcessHandle,
-            HANDLE writeToStdinHandle, HANDLE readFromStdoutHandle,
-            HANDLE auxHandle1, HANDLE auxHandle2);
-
-  DWORD getPid();
-  HANDLE getChildProcessHandle();
-  HANDLE getWriteToStdinHandle();
-  HANDLE getReadFromStdoutHandle();
-
-  /** Set the client which is currently attached to the target process
-      via this child process. Set this to NULL to indicate that the
-      child process is ready to accept another attachment. */
-  void setClient(ClientInfo* clientInfo);
-
-  ClientInfo* getClient();
-
-  /** This is NOT automatically called in the destructor */
-  void closeAll();
-
-private:
-  DWORD pid;
-  HANDLE childProcessHandle;
-  HANDLE writeToStdinHandle;
-  HANDLE readFromStdoutHandle;
-  HANDLE auxHandle1;
-  HANDLE auxHandle2;
-  ClientInfo* client;
-};
-
-// We keep track of a list of child debugger processes, each of which
-// is responsible for debugging a certain target process. These
-// debugger processes can serve multiple clients during their
-// lifetime. When a client detaches from a given process or tells the
-// debugger to "exit", the debug server is notified that the child
-// process is once again available to accept connections from clients.
-
-class ChildList {
-private:
-  typedef std::vector<ChildInfo*> ChildInfoList;
-
-public:
-  ChildList();
-  ~ChildList();
-
-  void addChild(ChildInfo*);
-
-  /** Removes and returns the ChildInfo* associated with the given
-      child process handle. */
-  ChildInfo* removeChild(HANDLE childProcessHandle);
-
-  /** Removes the given ChildInfo. */
-  void removeChild(ChildInfo* info);
-
-  /** Return the ChildInfo* associated with a given process ID without
-      removing it from the list. */
-  ChildInfo* getChildByPid(DWORD pid);
-
-  /** Iteration support */
-  int size();
-
-  /** Iteration support */
-  ChildInfo* getChildByIndex(int index);
-
-private:
-  ChildInfoList childList;
-};
-
-// We also keep a list of clients whose requests we are responsible
-// for serving. Clients can attach and detach from child processes.
-
-class ClientInfo {
-public:
-  ClientInfo(SOCKET dataSocket);
-  ~ClientInfo();
-
-  SOCKET getDataSocket();
-  /** Gets an IOBuf configured for the data socket, which should be
-      used for all communication with the client. */
-  IOBuf* getIOBuf();
-
-  /** Set the information for the process to which this client is
-      attached. Set this to NULL to indicate that the client is not
-      currently attached to any target process. */
-  void setTarget(ChildInfo* childInfo);
-
-  /** Get the information for the process to which this client is
-      currently attached, or NULL if none. */
-  ChildInfo* getTarget();
-
-  /** Close down the socket connection to this client. This is NOT
-      automatically called by the destructor. */
-  void closeAll();
-
-private:
-  SOCKET dataSocket;
-  IOBuf* buf;
-  ChildInfo* target;
-};
-
-class ClientList {
-private:
-  typedef std::vector<ClientInfo*> ClientInfoList;
-
-public:
-  ClientList();
-  ~ClientList();
-
-  /** Adds a client to the list. */
-  void addClient(ClientInfo* info);
-
-  /** Check to see whether the parent socket of any of the ClientInfo
-      objects is readable in the given fd_set. If so, returns TRUE and
-      sets the given ClientInfo* (a non-NULL pointer to which must be
-      given) appropriately. */
-  bool isAnyDataSocketSet(fd_set* fds, ClientInfo** info);
-
-  /** Removes a client from the list. User is responsible for deleting
-      the ClientInfo* using operator delete. */
-  void removeClient(ClientInfo* client);
-
-  /** Iteration support. */
-  int size();
-
-  /** Iteration support. */
-  ClientInfo* get(int num);
-
-private:
-  ClientInfoList clientList;
-};
-
-#endif  // #defined _SERVER_LISTS_
--- a/agent/src/os/win32/toolHelp.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "toolHelp.hpp"
-#include <assert.h>
-
-namespace ToolHelp {
-
-static HMODULE kernelDLL = NULL;
-
-HMODULE loadDLL() {
-  if (kernelDLL == NULL) {
-    kernelDLL = LoadLibrary("KERNEL32.DLL");
-  }
-
-  assert(kernelDLL != NULL);
-  return kernelDLL;
-}
-
-void unloadDLL() {
-  if (kernelDLL != NULL) {
-    FreeLibrary(kernelDLL);
-    kernelDLL = NULL;
-  }
-}
-
-} // namespace ToolHelp
--- a/agent/src/os/win32/toolHelp.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef _TOOLHELP_H_
-#define _TOOLHELP_H_
-
-#include <windows.h>
-#include <tlhelp32.h>
-
-namespace ToolHelp {
-extern "C" {
-
-  ///////////////
-  // Snapshots //
-  ///////////////
-  typedef HANDLE WINAPI
-  CreateToolhelp32SnapshotFunc(DWORD dwFlags, DWORD th32ProcessID);
-
-  //////////////////
-  // Process List //
-  //////////////////
-  typedef BOOL WINAPI Process32FirstFunc(HANDLE hSnapshot,
-                                         LPPROCESSENTRY32 lppe);
-
-  typedef BOOL WINAPI Process32NextFunc(HANDLE hSnapshot,
-                                        LPPROCESSENTRY32 lppe);
-
-  // NOTE: although these routines are defined in TLHELP32.H, they
-  // seem to always return false (maybe only under US locales)
-  typedef BOOL WINAPI Process32FirstWFunc(HANDLE hSnapshot,
-                                          LPPROCESSENTRY32W lppe);
-
-  typedef BOOL WINAPI Process32NextWFunc(HANDLE hSnapshot,
-                                         LPPROCESSENTRY32W lppe);
-
-  /////////////////
-  // Module List //
-  /////////////////
-  typedef BOOL WINAPI
-  Module32FirstFunc(HANDLE hSnapshot, LPMODULEENTRY32 lpme);
-
-  typedef BOOL WINAPI
-  Module32NextFunc (HANDLE hSnapshot, LPMODULEENTRY32 lpme);
-
-
-  // Routines to load and unload KERNEL32.DLL.
-  HMODULE loadDLL();
-  // Safe to call even if has not been loaded
-  void    unloadDLL();
-
-} // extern "C"
-} // namespace "ToolHelp"
-
-#endif // #defined _TOOLHELP_H_
--- a/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -170,6 +170,7 @@
             final String errMsg = formatMessage(e.getMessage(), 80);
             System.err.println("Unable to connect to process ID " + pid + ":\n\n" + errMsg);
             agent.detach();
+            e.printStackTrace();
             return;
         }
     }
@@ -191,6 +192,7 @@
             final String errMsg = formatMessage(e.getMessage(), 80);
             System.err.println("Unable to open core file\n" + corePath + ":\n\n" + errMsg);
             agent.detach();
+            e.printStackTrace();
             return;
         }
     }
@@ -209,6 +211,7 @@
             final String errMsg = formatMessage(e.getMessage(), 80);
             System.err.println("Unable to connect to machine \"" + remoteMachineName + "\":\n\n" + errMsg);
             agent.detach();
+            e.printStackTrace();
             return;
         }
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Thu Dec 22 15:46:11 2011 +0000
@@ -40,6 +40,8 @@
 import sun.jvm.hotspot.interpreter.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.opto.*;
+import sun.jvm.hotspot.ci.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.utilities.*;
 import sun.jvm.hotspot.utilities.soql.*;
@@ -48,6 +50,8 @@
 import sun.jvm.hotspot.tools.*;
 import sun.jvm.hotspot.tools.ObjectHistogram;
 import sun.jvm.hotspot.tools.StackTrace;
+import sun.jvm.hotspot.tools.jcore.ClassDump;
+import sun.jvm.hotspot.tools.jcore.ClassFilter;
 
 public class CommandProcessor {
     public abstract static class DebuggerInterface {
@@ -59,6 +63,27 @@
         public abstract void reattach();
     }
 
+    public static class BootFilter implements ClassFilter {
+        public boolean canInclude(InstanceKlass kls) {
+            return kls.getClassLoader() == null;
+        }
+    }
+
+    public static class NonBootFilter implements ClassFilter {
+        private HashMap emitted = new HashMap();
+        public boolean canInclude(InstanceKlass kls) {
+            if (kls.getClassLoader() == null) return false;
+            if (emitted.get(kls.getName()) != null) {
+                // Since multiple class loaders are being shoved
+                // together duplicate classes are a possibilty.  For
+                // now just ignore them.
+                return false;
+            }
+            emitted.put(kls.getName(), kls);
+            return true;
+        }
+    }
+
     static class Tokens {
         final String input;
         int i;
@@ -258,9 +283,14 @@
     }
 
     void dumpFields(Type type) {
+        dumpFields(type, true);
+    }
+
+    void dumpFields(Type type, boolean allowStatic) {
         Iterator i = type.getFields();
         while (i.hasNext()) {
             Field f = (Field) i.next();
+            if (!allowStatic && f.isStatic()) continue;
             out.print("field ");
             quote(type.getName());
             out.print(" ");
@@ -458,13 +488,18 @@
                     });
             }
         },
-        new Command("flags", "flags [ flag ]", false) {
+        new Command("flags", "flags [ flag | -nd ]", false) {
             public void doit(Tokens t) {
                 int tokens = t.countTokens();
                 if (tokens != 0 && tokens != 1) {
                     usage();
                 } else {
                     String name = tokens > 0 ? t.nextToken() : null;
+                    boolean nonDefault = false;
+                    if (name != null && name.equals("-nd")) {
+                        name = null;
+                        nonDefault = true;
+                    }
 
                     VM.Flag[] flags = VM.getVM().getCommandLineFlags();
                     if (flags == null) {
@@ -474,7 +509,12 @@
                         for (int f = 0; f < flags.length; f++) {
                             VM.Flag flag = flags[f];
                             if (name == null || flag.getName().equals(name)) {
-                                out.println(flag.getName() + " = " + flag.getValue());
+
+                                if (nonDefault && flag.getOrigin() == 0) {
+                                    // only print flags which aren't their defaults
+                                    continue;
+                                }
+                                out.println(flag.getName() + " = " + flag.getValue() + " " + flag.getOrigin());
                                 printed = true;
                             }
                         }
@@ -586,6 +626,158 @@
                 }
             }
         },
+        new Command("printmdo", "printmdo [ -a | expression ]", false) {
+            // Print every MDO in the heap or the one referenced by expression.
+            public void doit(Tokens t) {
+                if (t.countTokens() != 1) {
+                    usage();
+                } else {
+                    String s = t.nextToken();
+                    if (s.equals("-a")) {
+                        HeapVisitor iterator = new DefaultHeapVisitor() {
+                                public boolean doObj(Oop obj) {
+                                    if (obj instanceof MethodData) {
+                                        Method m = ((MethodData)obj).getMethod();
+                                        out.println("MethodData " + obj.getHandle() + " for " +
+                                                    "method " + m.getMethodHolder().getName().asString() + "." +
+                                                    m.getName().asString() +
+                                                    m.getSignature().asString() + "@" + m.getHandle());
+                                        ((MethodData)obj).printDataOn(out);
+                                    }
+                                    return false;
+                                }
+                            };
+                        VM.getVM().getObjectHeap().iteratePerm(iterator);
+                    } else {
+                        Address a = VM.getVM().getDebugger().parseAddress(s);
+                        OopHandle handle = a.addOffsetToAsOopHandle(0);
+                        MethodData mdo = (MethodData)VM.getVM().getObjectHeap().newOop(handle);
+                        mdo.printDataOn(out);
+                    }
+                }
+            }
+        },
+        new Command("dumpideal", "dumpideal { -a | id }", false) {
+            // Do a full dump of the nodes reachabile from root in each compiler thread.
+            public void doit(Tokens t) {
+                if (t.countTokens() != 1) {
+                    usage();
+                } else {
+                    String name = t.nextToken();
+                    boolean all = name.equals("-a");
+                    Threads threads = VM.getVM().getThreads();
+                    for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
+                        ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                        thread.printThreadIDOn(new PrintStream(bos));
+                        if (all || bos.toString().equals(name)) {
+                          if (thread instanceof CompilerThread) {
+                            CompilerThread ct = (CompilerThread)thread;
+                            out.println(ct);
+                            ciEnv env = ct.env();
+                            if (env != null) {
+                              Compile c = env.compilerData();
+                              c.root().dump(9999, out);
+                            } else {
+                              out.println("  not compiling");
+                            }
+                          }
+                        }
+                    }
+                }
+            }
+        },
+        new Command("dumpcfg", "dumpcfg { -a | id }", false) {
+            // Dump the PhaseCFG for every compiler thread that has one live.
+            public void doit(Tokens t) {
+                if (t.countTokens() != 1) {
+                    usage();
+                } else {
+                    String name = t.nextToken();
+                    boolean all = name.equals("-a");
+                    Threads threads = VM.getVM().getThreads();
+                    for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
+                        ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                        thread.printThreadIDOn(new PrintStream(bos));
+                        if (all || bos.toString().equals(name)) {
+                          if (thread instanceof CompilerThread) {
+                            CompilerThread ct = (CompilerThread)thread;
+                            out.println(ct);
+                            ciEnv env = ct.env();
+                            if (env != null) {
+                              Compile c = env.compilerData();
+                              c.cfg().dump(out);
+                            }
+                          }
+                        }
+                    }
+                }
+            }
+        },
+        new Command("dumpilt", "dumpilt { -a | id }", false) {
+            // dumps the InlineTree of a C2 compile
+            public void doit(Tokens t) {
+                if (t.countTokens() != 1) {
+                    usage();
+                } else {
+                    String name = t.nextToken();
+                    boolean all = name.equals("-a");
+                    Threads threads = VM.getVM().getThreads();
+                    for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
+                        ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                        thread.printThreadIDOn(new PrintStream(bos));
+                        if (all || bos.toString().equals(name)) {
+                            if (thread instanceof CompilerThread) {
+                                CompilerThread ct = (CompilerThread)thread;
+                                ciEnv env = ct.env();
+                                if (env != null) {
+                                    Compile c = env.compilerData();
+                                    InlineTree ilt = c.ilt();
+                                    if (ilt != null) {
+                                        ilt.print(out);
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        },
+        new Command("vmstructsdump", "vmstructsdump", false) {
+            public void doit(Tokens t) {
+                if (t.countTokens() != 0) {
+                    usage();
+                    return;
+                }
+
+                // Dump a copy of the type database in a form that can
+                // be read back.
+                Iterator i = agent.getTypeDataBase().getTypes();
+                // Make sure the types are emitted in an order than can be read back in
+                HashSet emitted = new HashSet();
+                Stack pending = new Stack();
+                while (i.hasNext()) {
+                    Type n = (Type)i.next();
+                    if (emitted.contains(n.getName())) {
+                        continue;
+                    }
+
+                    while (n != null && !emitted.contains(n.getName())) {
+                        pending.push(n);
+                        n = n.getSuperclass();
+                    }
+                    while (!pending.empty()) {
+                        n = (Type)pending.pop();
+                        dumpType(n);
+                        emitted.add(n.getName());
+                    }
+                }
+                i = agent.getTypeDataBase().getTypes();
+                while (i.hasNext()) {
+                    dumpFields((Type)i.next(), false);
+                }
+            }
+        },
+
         new Command("inspect", "inspect expression", false) {
             public void doit(Tokens t) {
                 if (t.countTokens() != 1) {
@@ -760,6 +952,50 @@
                 }
             }
         },
+        new Command("intConstant", "intConstant [ name [ value ] ]", true) {
+            public void doit(Tokens t) {
+                if (t.countTokens() != 1 && t.countTokens() != 0 && t.countTokens() != 2) {
+                    usage();
+                    return;
+                }
+                HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
+                if (t.countTokens() == 1) {
+                    out.println("intConstant " + name + " " + db.lookupIntConstant(name));
+                } else if (t.countTokens() == 0) {
+                    Iterator i = db.getIntConstants();
+                    while (i.hasNext()) {
+                        String name = (String)i.next();
+                        out.println("intConstant " + name + " " + db.lookupIntConstant(name));
+                    }
+                } else if (t.countTokens() == 2) {
+                    String name = t.nextToken();
+                    Integer value = Integer.valueOf(t.nextToken());
+                    db.addIntConstant(name, value);
+                }
+            }
+        },
+        new Command("longConstant", "longConstant [ name [ value ] ]", true) {
+            public void doit(Tokens t) {
+                if (t.countTokens() != 1 && t.countTokens() != 0 && t.countTokens() != 2) {
+                    usage();
+                    return;
+                }
+                HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
+                if (t.countTokens() == 1) {
+                    out.println("longConstant " + name + " " + db.lookupLongConstant(name));
+                } else if (t.countTokens() == 0) {
+                    Iterator i = db.getLongConstants();
+                    while (i.hasNext()) {
+                        String name = (String)i.next();
+                        out.println("longConstant " + name + " " + db.lookupLongConstant(name));
+                    }
+                } else if (t.countTokens() == 2) {
+                    String name = t.nextToken();
+                    Long value = Long.valueOf(t.nextToken());
+                    db.addLongConstant(name, value);
+                }
+            }
+        },
         new Command("field", "field [ type [ name fieldtype isStatic offset address ] ]", true) {
             public void doit(Tokens t) {
                 if (t.countTokens() != 1 && t.countTokens() != 0 && t.countTokens() != 6) {
@@ -1311,13 +1547,13 @@
                 return;
             }
 
-            executeCommand(ln);
+            executeCommand(ln, prompt);
         }
     }
 
     static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*)|(![a-zA-Z][^ ]*))");
 
-    public void executeCommand(String ln) {
+    public void executeCommand(String ln, boolean putInHistory) {
         if (ln.indexOf('!') != -1) {
             int size = history.size();
             if (size == 0) {
@@ -1406,7 +1642,7 @@
         Tokens t = new Tokens(ln);
         if (t.hasMoreTokens()) {
             boolean error = false;
-            history.add(ln);
+            if (putInHistory) history.add(ln);
             int len = t.countTokens();
             if (len > 2) {
                 String r = t.at(len - 2);
--- a/agent/src/share/classes/sun/jvm/hotspot/DebugServer.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/DebugServer.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 package sun.jvm.hotspot;
 
 import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.dbx.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.oops.*;
 
--- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1740,7 +1740,7 @@
       else if (f.isCompiledFrame())    { tty.print("compiled"); }
       else if (f.isEntryFrame())       { tty.print("entry"); }
       else if (f.isNativeFrame())      { tty.print("native"); }
-      else if (f.isGlueFrame())        { tty.print("glue"); }
+      else if (f.isRuntimeFrame())     { tty.print("runtime"); }
       else { tty.print("external"); }
       tty.print(" frame with PC = " + f.getPC() + ", SP = " + f.getSP() + ", FP = " + f.getFP());
       if (f.isSignalHandlerFrameDbg()) {
--- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,10 +28,8 @@
 import java.net.*;
 import java.rmi.*;
 import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.dbx.*;
 import sun.jvm.hotspot.debugger.proc.*;
 import sun.jvm.hotspot.debugger.remote.*;
-import sun.jvm.hotspot.debugger.win32.*;
 import sun.jvm.hotspot.debugger.windbg.*;
 import sun.jvm.hotspot.debugger.linux.*;
 import sun.jvm.hotspot.memory.*;
@@ -436,113 +434,35 @@
 
     private void setupDebuggerSolaris() {
         setupJVMLibNamesSolaris();
-        if(System.getProperty("sun.jvm.hotspot.debugger.useProcDebugger") != null) {
-            ProcDebuggerLocal dbg = new ProcDebuggerLocal(null, true);
-            debugger = dbg;
-            attachDebugger();
-
-            // Set up CPU-dependent stuff
-            if (cpu.equals("x86")) {
-                machDesc = new MachineDescriptionIntelX86();
-            } else if (cpu.equals("sparc")) {
-                int addressSize = dbg.getRemoteProcessAddressSize();
-                if (addressSize == -1) {
-                    throw new DebuggerException("Error occurred while trying to determine the remote process's " +
-                    "address size");
-                }
+        ProcDebuggerLocal dbg = new ProcDebuggerLocal(null, true);
+        debugger = dbg;
+        attachDebugger();
 
-                if (addressSize == 32) {
-                    machDesc = new MachineDescriptionSPARC32Bit();
-                } else if (addressSize == 64) {
-                    machDesc = new MachineDescriptionSPARC64Bit();
-                } else {
-                    throw new DebuggerException("Address size " + addressSize + " is not supported on SPARC");
-                }
-            } else if (cpu.equals("amd64")) {
-                machDesc = new MachineDescriptionAMD64();
-            } else {
-                throw new DebuggerException("Solaris only supported on sparc/sparcv9/x86/amd64");
-            }
-
-            dbg.setMachineDescription(machDesc);
-            return;
-
-        } else {
-            String dbxPathName;
-            String dbxPathPrefix;
-            String dbxSvcAgentDSOPathName;
-            String dbxSvcAgentDSOPathPrefix;
-            String[] dbxSvcAgentDSOPathNames = null;
-
-            // use path names/prefixes specified on command
-            dbxPathName = System.getProperty("dbxPathName");
-            if (dbxPathName == null) {
-                dbxPathPrefix = System.getProperty("dbxPathPrefix");
-                if (dbxPathPrefix == null) {
-                    dbxPathPrefix = defaultDbxPathPrefix;
-                }
-                dbxPathName = dbxPathPrefix + fileSep + os + fileSep + cpu + fileSep + "bin" + fileSep + "dbx";
+        // Set up CPU-dependent stuff
+        if (cpu.equals("x86")) {
+            machDesc = new MachineDescriptionIntelX86();
+        } else if (cpu.equals("sparc")) {
+            int addressSize = dbg.getRemoteProcessAddressSize();
+            if (addressSize == -1) {
+                throw new DebuggerException("Error occurred while trying to determine the remote process's " +
+                                            "address size");
             }
 
-            dbxSvcAgentDSOPathName = System.getProperty("dbxSvcAgentDSOPathName");
-            if (dbxSvcAgentDSOPathName != null) {
-                dbxSvcAgentDSOPathNames = new String[] { dbxSvcAgentDSOPathName } ;
+            if (addressSize == 32) {
+                machDesc = new MachineDescriptionSPARC32Bit();
+            } else if (addressSize == 64) {
+                machDesc = new MachineDescriptionSPARC64Bit();
             } else {
-                dbxSvcAgentDSOPathPrefix = System.getProperty("dbxSvcAgentDSOPathPrefix");
-                if (dbxSvcAgentDSOPathPrefix == null) {
-                    dbxSvcAgentDSOPathPrefix = defaultDbxSvcAgentDSOPathPrefix;
-                }
-                if (cpu.equals("sparc")) {
-                    dbxSvcAgentDSOPathNames = new String[] {
-                        // FIXME: bad hack for SPARC v9. This is necessary because
-                        // there are two dbx executables on SPARC, one for v8 and one
-                        // for v9, and it isn't obvious how to tell the two apart
-                        // using the dbx command line. See
-                        // DbxDebuggerLocal.importDbxModule().
-                        dbxSvcAgentDSOPathPrefix + fileSep + os + fileSep + cpu + "v9" + fileSep + "lib" +
-                        fileSep + "libsvc_agent_dbx.so",
-                        dbxSvcAgentDSOPathPrefix + fileSep + os + fileSep + cpu + fileSep + "lib" +
-                        fileSep + "libsvc_agent_dbx.so",
-                    };
-                } else {
-                    dbxSvcAgentDSOPathNames = new String[] {
-                        dbxSvcAgentDSOPathPrefix + fileSep + os + fileSep + cpu + fileSep + "lib" +
-                        fileSep + "libsvc_agent_dbx.so"
-                    };
-                }
+                throw new DebuggerException("Address size " + addressSize + " is not supported on SPARC");
             }
+        } else if (cpu.equals("amd64")) {
+            machDesc = new MachineDescriptionAMD64();
+        } else {
+            throw new DebuggerException("Solaris only supported on sparc/sparcv9/x86/amd64");
+        }
 
-            // Note we do not use a cache for the local debugger in server
-            // mode; it's taken care of on the client side
-            DbxDebuggerLocal dbg = new DbxDebuggerLocal(null, dbxPathName, dbxSvcAgentDSOPathNames, !isServer);
-            debugger = dbg;
-
-            attachDebugger();
-
-            // Set up CPU-dependent stuff
-            if (cpu.equals("x86")) {
-                machDesc = new MachineDescriptionIntelX86();
-            } else if (cpu.equals("sparc")) {
-                int addressSize = dbg.getRemoteProcessAddressSize();
-                if (addressSize == -1) {
-                    throw new DebuggerException("Error occurred while trying to determine the remote process's " +
-                    "address size. It's possible that the Serviceability Agent's dbx module failed to " +
-                    "initialize. Examine the standard output and standard error streams from the dbx " +
-                    "process for more information.");
-                }
-
-                if (addressSize == 32) {
-                    machDesc = new MachineDescriptionSPARC32Bit();
-                } else if (addressSize == 64) {
-                    machDesc = new MachineDescriptionSPARC64Bit();
-                } else {
-                    throw new DebuggerException("Address size " + addressSize + " is not supported on SPARC");
-                }
-            }
-
-            dbg.setMachineDescription(machDesc);
-
-        }
+        dbg.setMachineDescription(machDesc);
+        return;
     }
 
     private void connectRemoteDebugger() throws DebuggerException {
@@ -589,11 +509,7 @@
         // mode; it will be taken care of on the client side (once remote
         // debugging is implemented).
 
-        if (System.getProperty("sun.jvm.hotspot.debugger.useWindbgDebugger") != null) {
-            debugger = new WindbgDebuggerLocal(machDesc, !isServer);
-        } else {
-            debugger = new Win32DebuggerLocal(machDesc, !isServer);
-        }
+        debugger = new WindbgDebuggerLocal(machDesc, !isServer);
 
         attachDebugger();
 
--- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java	Thu Dec 22 15:46:11 2011 +0000
@@ -87,6 +87,7 @@
     readVMStructs();
     readVMIntConstants();
     readVMLongConstants();
+    readExternalDefinitions();
   }
 
   public Type lookupType(String cTypeName, boolean throwException) {
@@ -98,9 +99,9 @@
         fieldType = (BasicType)lookupType(cTypeName.substring(0, cTypeName.length() - 6), false);
     }
     if (fieldType == null) {
-      if (cTypeName.startsWith("GrowableArray<") && cTypeName.endsWith(">*")) {
+      if (cTypeName.startsWith("GrowableArray<") && cTypeName.endsWith(">")) {
         String ttype = cTypeName.substring("GrowableArray<".length(),
-                                            cTypeName.length() - 2);
+                                            cTypeName.length() - 1);
         Type templateType = lookupType(ttype, false);
         if (templateType == null && typeNameIsPointerType(ttype)) {
           templateType = recursiveCreateBasicPointerType(ttype);
@@ -108,7 +109,21 @@
         if (templateType == null) {
           lookupOrFail(ttype);
         }
-        fieldType = recursiveCreateBasicPointerType(cTypeName);
+
+        BasicType basicTargetType = createBasicType(cTypeName, false, false, false);
+
+        // transfer fields from GenericGrowableArray to template instance
+        BasicType generic = lookupOrFail("GenericGrowableArray");
+        BasicType specific = lookupOrFail("GrowableArray<int>");
+        basicTargetType.setSize(specific.getSize());
+        Iterator fields = generic.getFields();
+        while (fields.hasNext()) {
+          Field f = (Field)fields.next();
+          basicTargetType.addField(internalCreateField(basicTargetType, f.getName(),
+                                                       f.getType(), f.isStatic(),
+                                                       f.getOffset(), null));
+        }
+        fieldType = basicTargetType;
       }
     }
     if (fieldType == null && typeNameIsPointerType(cTypeName)) {
@@ -208,6 +223,156 @@
     return type;
   }
 
+  private void readExternalDefinitions() {
+    String file = System.getProperty("sun.jvm.hotspot.typedb");
+    if (file != null) {
+      System.out.println("Reading " + file);
+      BufferedReader in = null;
+      try {
+        StreamTokenizer t = new StreamTokenizer(in = new BufferedReader(new InputStreamReader(new FileInputStream(file))));
+        t.resetSyntax();
+        t.wordChars('\u0000','\uFFFF');
+        t.whitespaceChars(' ', ' ');
+        t.whitespaceChars('\n', '\n');
+        t.whitespaceChars('\r', '\r');
+        t.quoteChar('\"');
+        t.eolIsSignificant(true);
+        while (t.nextToken() != StreamTokenizer.TT_EOF) {
+          if (t.ttype == StreamTokenizer.TT_EOL) {
+            continue;
+          }
+
+          if (t.sval.equals("field")) {
+            t.nextToken();
+            BasicType containingType = (BasicType)lookupType(t.sval);
+            t.nextToken();
+            String fieldName = t.sval;
+
+            // The field's Type must already be in the database -- no exceptions
+            t.nextToken();
+            Type fieldType = lookupType(t.sval);
+            t.nextToken();
+            boolean isStatic = Boolean.valueOf(t.sval).booleanValue();
+            t.nextToken();
+            long offset = Long.parseLong(t.sval);
+            t.nextToken();
+            Address staticAddress = null;
+            if (isStatic) {
+              throw new InternalError("static fields not supported");
+            }
+
+            // check to see if the field already exists
+            Iterator i = containingType.getFields();
+            boolean defined = false;
+            while (i.hasNext()) {
+              Field f = (Field) i.next();
+              if (f.getName().equals(fieldName)) {
+                if (f.isStatic() != isStatic) {
+                  throw new RuntimeException("static/nonstatic mismatch: " + fieldName);
+                }
+                if (!isStatic) {
+                  if (f.getOffset() != offset) {
+                    throw new RuntimeException("bad redefinition of field offset: " + fieldName);
+                  }
+                } else {
+                  if (!f.getStaticFieldAddress().equals(staticAddress)) {
+                    throw new RuntimeException("bad redefinition of field location: " + fieldName);
+                  }
+                }
+                if (f.getType() != fieldType) {
+                  System.out.println(fieldType);
+                  System.out.println(f.getType());
+                  throw new RuntimeException("bad redefinition of field type: " + fieldName);
+                }
+                defined = true;
+                break;
+              }
+            }
+
+            if (!defined) {
+              // Create field by type
+              createField(containingType,
+                          fieldName, fieldType,
+                          isStatic,
+                          offset,
+                          staticAddress);
+            }
+          } else if (t.sval.equals("type")) {
+            t.nextToken();
+            String typeName = t.sval;
+            t.nextToken();
+            String superclassName = t.sval;
+            if (superclassName.equals("null")) {
+              superclassName = null;
+            }
+            t.nextToken();
+            boolean isOop = Boolean.valueOf(t.sval).booleanValue();
+            t.nextToken();
+            boolean isInteger = Boolean.valueOf(t.sval).booleanValue();
+            t.nextToken();
+            boolean isUnsigned = Boolean.valueOf(t.sval).booleanValue();
+            t.nextToken();
+            long size = Long.parseLong(t.sval);
+
+            BasicType type = null;
+            try {
+              type = (BasicType)lookupType(typeName);
+            } catch (RuntimeException e) {
+            }
+            if (type != null) {
+              if (type.isOopType() != isOop) {
+                throw new RuntimeException("oop mismatch in type definition: " + typeName);
+              }
+              if (type.isCIntegerType() != isInteger) {
+                throw new RuntimeException("integer type mismatch in type definition: " + typeName);
+              }
+              if (type.isCIntegerType() && (((CIntegerType)type).isUnsigned()) != isUnsigned) {
+                throw new RuntimeException("unsigned mismatch in type definition: " + typeName);
+              }
+              if (type.getSuperclass() == null) {
+                if (superclassName != null) {
+                  if (type.getSize() == -1) {
+                    type.setSuperclass(lookupType(superclassName));
+                  } else {
+                    throw new RuntimeException("unexpected superclass in type definition: " + typeName);
+                  }
+                }
+              } else {
+                if (superclassName == null) {
+                  throw new RuntimeException("missing superclass in type definition: " + typeName);
+                }
+                if (!type.getSuperclass().getName().equals(superclassName)) {
+                  throw new RuntimeException("incorrect superclass in type definition: " + typeName);
+                }
+              }
+              if (type.getSize() != size) {
+                if (type.getSize() == -1 || type.getSize() == 0) {
+                  type.setSize(size);
+                } else {
+                  throw new RuntimeException("size mismatch in type definition: " + typeName + ": " + type.getSize() + " != " + size);
+                }
+              }
+            }
+
+            if (lookupType(typeName, false) == null) {
+              // Create type
+              createType(typeName, superclassName, isOop, isInteger, isUnsigned, size);
+            }
+          } else {
+            throw new InternalError("\"" + t.sval + "\"");
+          }
+        }
+      } catch (IOException ioe) {
+        ioe.printStackTrace();
+      } finally {
+        try {
+          in.close();
+        } catch (Exception e) {
+        }
+      }
+    }
+  }
+
   private void readVMStructs() {
     // Get the variables we need in order to traverse the VMStructEntry[]
     long structEntryTypeNameOffset;
@@ -504,20 +669,6 @@
           BasicType basicTargetType = createBasicType(targetTypeName, false, true, true);
           basicTargetType.setSize(1);
           targetType = basicTargetType;
-        } else if (targetTypeName.startsWith("GrowableArray<")) {
-          BasicType basicTargetType = createBasicType(targetTypeName, false, false, false);
-
-          // transfer fields from GenericGrowableArray to template instance
-          BasicType generic = lookupOrFail("GenericGrowableArray");
-          basicTargetType.setSize(generic.getSize());
-          Iterator fields = generic.getFields();
-          while (fields.hasNext()) {
-              Field f = (Field)fields.next();
-              basicTargetType.addField(internalCreateField(basicTargetType, f.getName(),
-                                                           f.getType(), f.isStatic(),
-                                                           f.getOffset(), null));
-          }
-          targetType = basicTargetType;
         } else {
           if (DEBUG) {
             System.err.println("WARNING: missing target type \"" + targetTypeName + "\" for pointer type \"" + typeName + "\"");
@@ -572,7 +723,7 @@
 
         // Classes are created with a size of UNINITIALIZED_SIZE.
         // Set size if necessary.
-        if (curType.getSize() == UNINITIALIZED_SIZE) {
+        if (curType.getSize() == UNINITIALIZED_SIZE || curType.getSize() == 0) {
             curType.setSize(size);
         } else {
             if (curType.getSize() != size) {
--- a/agent/src/share/classes/sun/jvm/hotspot/TestDebugger.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/TestDebugger.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,18 +25,12 @@
 package sun.jvm.hotspot;
 
 import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.dbx.*;
+import sun.jvm.hotspot.debugger.proc.*;
 
 // A test of the debugger backend. This should be used to connect to
 // the helloWorld.cpp program.
 
 public class TestDebugger {
-  // FIXME: make these configurable, i.e., via a dotfile
-  private static final String dbxPathName               = "/export/home/kbr/ws/dbx_61/dev/Derived-sparcv9-S2./src/dbx/dbx";
-  private static final String[] dbxSvcAgentDSOPathNames =
-    new String[] {
-      "/export/home/kbr/main/sa_baseline/src/os/solaris/agent/libsvc_agent_dbx.so"
-    };
 
   private static void usage() {
     System.out.println("usage: java TestDebugger [pid]");
@@ -58,8 +52,7 @@
         usage();
       }
 
-      JVMDebugger debugger = new DbxDebuggerLocal(new MachineDescriptionSPARC64Bit(),
-                                                  dbxPathName, dbxSvcAgentDSOPathNames, true);
+      JVMDebugger debugger = new ProcDebuggerLocal(null, true);
 
       try {
         debugger.attach(pid);
--- a/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpot.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpot.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.debugger.cdbg.*;
 import sun.jvm.hotspot.debugger.posix.*;
-import sun.jvm.hotspot.debugger.win32.*;
+import sun.jvm.hotspot.debugger.windbg.*;
 import sun.jvm.hotspot.livejvm.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.oops.*;
@@ -604,7 +604,7 @@
           throw new DebuggerException("Unsupported CPU \"" + cpu + "\" for Windows");
         }
 
-        localDebugger = new Win32DebuggerLocal(new MachineDescriptionIntelX86(), true);
+        localDebugger = new WindbgDebuggerLocal(new MachineDescriptionIntelX86(), true);
       } else if (os.equals("linux")) {
         if (!cpu.equals("x86")) {
           throw new DebuggerException("Unsupported CPU \"" + cpu + "\" for Linux");
--- a/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpotAgent.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpotAgent.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,10 +29,8 @@
 import java.rmi.*;
 import sun.jvm.hotspot.*;
 import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.dbx.*;
 import sun.jvm.hotspot.debugger.proc.*;
 import sun.jvm.hotspot.debugger.cdbg.*;
-import sun.jvm.hotspot.debugger.win32.*;
 import sun.jvm.hotspot.debugger.windbg.*;
 import sun.jvm.hotspot.debugger.linux.*;
 import sun.jvm.hotspot.debugger.sparc.*;
@@ -627,104 +625,33 @@
 
     private void setupDebuggerSolaris() {
         setupJVMLibNamesSolaris();
-        String prop = System.getProperty("sun.jvm.hotspot.debugger.useProcDebugger");
-        if (prop != null && !prop.equals("false")) {
-            ProcDebuggerLocal dbg = new ProcDebuggerLocal(null, true);
-            debugger = dbg;
-            attachDebugger();
-
-            // Set up CPU-dependent stuff
-            if (cpu.equals("x86")) {
-                machDesc = new MachineDescriptionIntelX86();
-            } else if (cpu.equals("sparc")) {
-                int addressSize = dbg.getRemoteProcessAddressSize();
-                if (addressSize == -1) {
-                    throw new DebuggerException("Error occurred while trying to determine the remote process's address size");
-                }
+        ProcDebuggerLocal dbg = new ProcDebuggerLocal(null, true);
+        debugger = dbg;
+        attachDebugger();
 
-                if (addressSize == 32) {
-                    machDesc = new MachineDescriptionSPARC32Bit();
-                } else if (addressSize == 64) {
-                    machDesc = new MachineDescriptionSPARC64Bit();
-                } else {
-                    throw new DebuggerException("Address size " + addressSize + " is not supported on SPARC");
-                }
-            } else if (cpu.equals("amd64")) {
-                machDesc = new MachineDescriptionAMD64();
-            } else {
-                throw new DebuggerException("Solaris only supported on sparc/sparcv9/x86/amd64");
-            }
-
-            dbg.setMachineDescription(machDesc);
-            return;
-        } else {
-            String dbxPathName;
-            String dbxPathPrefix;
-            String dbxSvcAgentDSOPathName;
-            String dbxSvcAgentDSOPathPrefix;
-            String[] dbxSvcAgentDSOPathNames = null;
-
-            // use path names/prefixes specified on command
-            dbxPathName = System.getProperty("dbxPathName");
-            if (dbxPathName == null) {
-                dbxPathPrefix = System.getProperty("dbxPathPrefix");
-                if (dbxPathPrefix == null) {
-                    dbxPathPrefix = defaultDbxPathPrefix;
-                }
-                dbxPathName = dbxPathPrefix + fileSep + os + fileSep + cpu + fileSep + "bin" + fileSep + "dbx";
+        // Set up CPU-dependent stuff
+        if (cpu.equals("x86")) {
+            machDesc = new MachineDescriptionIntelX86();
+        } else if (cpu.equals("sparc")) {
+            int addressSize = dbg.getRemoteProcessAddressSize();
+            if (addressSize == -1) {
+                throw new DebuggerException("Error occurred while trying to determine the remote process's address size");
             }
 
-            dbxSvcAgentDSOPathName = System.getProperty("dbxSvcAgentDSOPathName");
-            if (dbxSvcAgentDSOPathName != null) {
-                dbxSvcAgentDSOPathNames = new String[] { dbxSvcAgentDSOPathName } ;
+            if (addressSize == 32) {
+                machDesc = new MachineDescriptionSPARC32Bit();
+            } else if (addressSize == 64) {
+                machDesc = new MachineDescriptionSPARC64Bit();
             } else {
-                dbxSvcAgentDSOPathPrefix = System.getProperty("dbxSvcAgentDSOPathPrefix");
-                if (dbxSvcAgentDSOPathPrefix == null) {
-                    dbxSvcAgentDSOPathPrefix = defaultDbxSvcAgentDSOPathPrefix;
-                }
-                if (cpu.equals("sparc")) {
-                    dbxSvcAgentDSOPathNames = new String[] {
-                        // FIXME: bad hack for SPARC v9. This is necessary because
-                        // there are two dbx executables on SPARC, one for v8 and one
-                        // for v9, and it isn't obvious how to tell the two apart
-                        // using the dbx command line. See
-                        // DbxDebuggerLocal.importDbxModule().
-                        dbxSvcAgentDSOPathPrefix + fileSep + os + fileSep + cpu + "v9" + fileSep + "lib" + fileSep + "libsvc_agent_dbx.so",
-                        dbxSvcAgentDSOPathPrefix + fileSep + os + fileSep + cpu + fileSep + "lib" + fileSep + "libsvc_agent_dbx.so",
-                    };
-                } else {
-                    dbxSvcAgentDSOPathNames = new String[] {
-                        dbxSvcAgentDSOPathPrefix + fileSep + os + fileSep + cpu + fileSep + "lib" + fileSep + "libsvc_agent_dbx.so"
-                    };
-                }
+                throw new DebuggerException("Address size " + addressSize + " is not supported on SPARC");
             }
-            // Note we do not use a cache for the local debugger in server
-            // mode; it's taken care of on the client side
-            DbxDebuggerLocal dbg = new DbxDebuggerLocal(null, dbxPathName, dbxSvcAgentDSOPathNames, !isServer);
-            debugger = dbg;
-
-            attachDebugger();
+        } else if (cpu.equals("amd64")) {
+            machDesc = new MachineDescriptionAMD64();
+        } else {
+            throw new DebuggerException("Solaris only supported on sparc/sparcv9/x86/amd64");
+        }
 
-            // Set up CPU-dependent stuff
-            if (cpu.equals("x86")) {
-                machDesc = new MachineDescriptionIntelX86();
-            } else if (cpu.equals("sparc")) {
-                int addressSize = dbg.getRemoteProcessAddressSize();
-                if (addressSize == -1) {
-                    throw new DebuggerException("Error occurred while trying to determine the remote process's address size. It's possible that the Serviceability Agent's dbx module failed to initialize. Examine the standard output and standard error streams from the dbx process for more information.");
-                }
-
-                if (addressSize == 32) {
-                    machDesc = new MachineDescriptionSPARC32Bit();
-                } else if (addressSize == 64) {
-                    machDesc = new MachineDescriptionSPARC64Bit();
-                } else {
-                    throw new DebuggerException("Address size " + addressSize + " is not supported on SPARC");
-                }
-            }
-
-            dbg.setMachineDescription(machDesc);
-        }
+        dbg.setMachineDescription(machDesc);
     }
 
     private void connectRemoteDebugger() throws DebuggerException {
@@ -772,11 +699,7 @@
         // mode; it will be taken care of on the client side (once remote
         // debugging is implemented).
 
-        if (System.getProperty("sun.jvm.hotspot.debugger.useWindbgDebugger") != null) {
-            debugger = new WindbgDebuggerLocal(machDesc, !isServer);
-        } else {
-            debugger = new Win32DebuggerLocal(machDesc, !isServer);
-        }
+        debugger = new WindbgDebuggerLocal(machDesc, !isServer);
 
         attachDebugger();
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciArrayKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciArrayKlass extends ciKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciArrayKlass");
+    dimensionField = new IntField(type.getJIntField("_dimension"), 0);
+  }
+
+  private static IntField dimensionField;
+
+  public ciArrayKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciArrayKlassKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciArrayKlassKlass extends ciKlassKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciArrayKlassKlass");
+  }
+
+
+  public ciArrayKlassKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciConstant.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciConstant extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciConstant");
+    valueObjectField = type.getAddressField("_value._object");
+    valueDoubleField = type.getJDoubleField("_value._double");
+    valueFloatField = type.getJFloatField("_value._float");
+    valueLongField = type.getJLongField("_value._long");
+    valueIntField = type.getJIntField("_value._int");
+    typeField = new CIntField(type.getCIntegerField("_type"), 0);
+  }
+
+  private static AddressField valueObjectField;
+  private static JDoubleField valueDoubleField;
+  private static JFloatField valueFloatField;
+  private static JLongField valueLongField;
+  private static JIntField valueIntField;
+  private static CIntField typeField;
+
+  public ciConstant(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.opto.*;
+import sun.jvm.hotspot.compiler.CompileTask;
+import sun.jvm.hotspot.prims.JvmtiExport;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.GrowableArray;
+
+public class ciEnv extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciEnv");
+    dependenciesField = type.getAddressField("_dependencies");
+    factoryField = type.getAddressField("_factory");
+    compilerDataField = type.getAddressField("_compiler_data");
+    taskField = type.getAddressField("_task");
+    systemDictionaryModificationCounterField = new CIntField(type.getCIntegerField("_system_dictionary_modification_counter"), 0);
+  }
+
+  private static AddressField dependenciesField;
+  private static AddressField factoryField;
+  private static AddressField compilerDataField;
+  private static AddressField taskField;
+  private static CIntField systemDictionaryModificationCounterField;
+
+  public ciEnv(Address addr) {
+    super(addr);
+  }
+
+  public Compile compilerData() {
+    return new Compile(compilerDataField.getValue(this.getAddress()));
+  }
+
+  public ciObjectFactory factory() {
+    return new ciObjectFactory(factoryField.getValue(this.getAddress()));
+  }
+
+  public CompileTask task() {
+    return new CompileTask(taskField.getValue(this.getAddress()));
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciField.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciField extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciField");
+    constantValueField = type.getAddressField("_constant_value");
+    isConstantField = type.getAddressField("_is_constant");
+    offsetField = new CIntField(type.getCIntegerField("_offset"), 0);
+    signatureField = type.getAddressField("_signature");
+    nameField = type.getAddressField("_name");
+    holderField = type.getAddressField("_holder");
+  }
+
+  private static AddressField constantValueField;
+  private static AddressField isConstantField;
+  private static CIntField offsetField;
+  private static AddressField signatureField;
+  private static AddressField nameField;
+  private static AddressField holderField;
+
+  public ciField(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciInstance.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciInstance extends ciObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciInstance");
+  }
+
+
+  public ciInstance(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciInstanceKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.memory.SystemDictionary;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+import sun.jvm.hotspot.types.WrongTypeException;
+
+public class ciInstanceKlass extends ciKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciInstanceKlass");
+    initStateField = new CIntField(type.getCIntegerField("_init_state"), 0);
+    isSharedField = new CIntField(type.getCIntegerField("_is_shared"), 0);
+    CLASS_STATE_LINKED = db.lookupIntConstant("instanceKlass::linked").intValue();
+    CLASS_STATE_FULLY_INITIALIZED = db.lookupIntConstant("instanceKlass::fully_initialized").intValue();
+  }
+
+  private static CIntField initStateField;
+  private static CIntField isSharedField;
+  private static int CLASS_STATE_LINKED;
+  private static int CLASS_STATE_FULLY_INITIALIZED;
+
+  public ciInstanceKlass(Address addr) {
+    super(addr);
+  }
+
+  public int initState() {
+    int initState = (int)initStateField.getValue(getAddress());
+    if (isShared() && initState < CLASS_STATE_LINKED) {
+      InstanceKlass ik = (InstanceKlass)getOop();
+      initState = ik.getInitStateAsInt();
+    }
+    return initState;
+  }
+
+  public boolean isShared() {
+    return isSharedField.getValue(getAddress()) != 0;
+  }
+
+  public boolean isLinked() {
+    return initState() >= CLASS_STATE_LINKED;
+  }
+
+  public boolean isInitialized() {
+    return initState() == CLASS_STATE_FULLY_INITIALIZED;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciInstanceKlassKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciInstanceKlassKlass extends ciKlassKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciInstanceKlassKlass");
+  }
+
+
+  public ciInstanceKlassKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciKlass extends ciType {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciKlass");
+    nameField = type.getAddressField("_name");
+  }
+
+  private static AddressField nameField;
+
+  public String name() {
+    ciSymbol sym = new ciSymbol(nameField.getValue(getAddress()));
+    return sym.asUtf88();
+  }
+
+  public ciKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciKlassKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciKlassKlass extends ciKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciKlassKlass");
+  }
+
+
+  public ciKlassKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethod.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.code.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciMethod extends ciObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciMethod");
+    interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0);
+    interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0);
+    try {
+        // XXX
+        instructionsSizeField = new CIntField(type.getCIntegerField("_instructions_size"), 0);
+    } catch (Exception e) {
+    }
+  }
+
+  private static CIntField interpreterThrowoutCountField;
+  private static CIntField interpreterInvocationCountField;
+  private static CIntField instructionsSizeField;
+
+  public ciMethod(Address addr) {
+    super(addr);
+  }
+
+  public Method method() {
+    return (Method)getOop();
+  }
+
+  public int interpreterThrowoutCount() {
+    return (int) interpreterThrowoutCountField.getValue(getAddress());
+  }
+
+  public int interpreterInvocationCount() {
+    return (int) interpreterInvocationCountField.getValue(getAddress());
+  }
+
+  public int instructionsSize() {
+    if (instructionsSizeField == null) {
+      // XXX
+      Method method = (Method)getOop();
+      NMethod nm = method.getNativeMethod();
+      if (nm != null) return (int)nm.codeEnd().minus(nm.getVerifiedEntryPoint());
+      return 0;
+    }
+    return (int) instructionsSizeField.getValue(getAddress());
+  }
+
+  public void printShortName(PrintStream st) {
+    Method method = (Method)getOop();
+    st.printf(" %s::%s", method.getMethodHolder().getName().asString().replace('/', '.'),
+              method.getName().asString());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethodData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciMethodData extends ciObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciMethodData");
+    origField = type.getAddressField("_orig");
+    currentMileageField = new CIntField(type.getCIntegerField("_current_mileage"), 0);
+    argReturnedField = new CIntField(type.getCIntegerField("_arg_returned"), 0);
+    argStackField = new CIntField(type.getCIntegerField("_arg_stack"), 0);
+    argLocalField = new CIntField(type.getCIntegerField("_arg_local"), 0);
+    eflagsField = new CIntField(type.getCIntegerField("_eflags"), 0);
+    hintDiField = new CIntField(type.getCIntegerField("_hint_di"), 0);
+    currentMileageField = new CIntField(type.getCIntegerField("_current_mileage"), 0);
+    dataField = type.getAddressField("_data");
+    extraDataSizeField = new CIntField(type.getCIntegerField("_extra_data_size"), 0);
+    dataSizeField = new CIntField(type.getCIntegerField("_data_size"), 0);
+    stateField = new CIntField(type.getCIntegerField("_state"), 0);
+    sizeofMethodDataOopDesc = (int)db.lookupType("methodDataOopDesc").getSize();;
+  }
+
+  private static AddressField origField;
+  private static CIntField currentMileageField;
+  private static CIntField argReturnedField;
+  private static CIntField argStackField;
+  private static CIntField argLocalField;
+  private static CIntField eflagsField;
+  private static CIntField hintDiField;
+  private static AddressField dataField;
+  private static CIntField extraDataSizeField;
+  private static CIntField dataSizeField;
+  private static CIntField stateField;
+  private static int sizeofMethodDataOopDesc;
+
+  public ciMethodData(Address addr) {
+    super(addr);
+  }
+
+  private byte[] fetchDataAt(Address base, long size) {
+    byte[] result = new byte[(int)size];
+    for (int i = 0; i < size; i++) {
+      result[i] = base.getJByteAt(i);
+    }
+    return result;
+  }
+
+  public byte[] orig() {
+    // fetch the orig methodDataOopDesc data between header and dataSize
+    Address base = getAddress().addOffsetTo(origField.getOffset());
+    byte[] result = new byte[MethodData.sizeofMethodDataOopDesc];
+    for (int i = 0; i < MethodData.sizeofMethodDataOopDesc; i++) {
+      result[i] = base.getJByteAt(i);
+    }
+    return result;
+  }
+
+  public  long[] data() {
+    // Read the data as an array of intptr_t elements
+    Address base = dataField.getValue(getAddress());
+    int elements = dataSize() / MethodData.cellSize;
+    long[] result = new long[elements];
+    for (int i = 0; i < elements; i++) {
+      Address value = base.getAddressAt(i * MethodData.cellSize);
+      if (value != null) {
+        result[i] = value.minus(null);
+      }
+    }
+    return result;
+  }
+
+  int dataSize() {
+    return (int)dataSizeField.getValue(getAddress());
+  }
+
+  int state() {
+    return (int)stateField.getValue(getAddress());
+  }
+
+  int currentMileage() {
+    return (int)currentMileageField.getValue(getAddress());
+  }
+
+  boolean outOfBounds(int dataIndex) {
+    return dataIndex >= dataSize();
+  }
+
+  ProfileData dataAt(int dataIndex) {
+    if (outOfBounds(dataIndex)) {
+      return null;
+    }
+    DataLayout dataLayout = new DataLayout(dataField.getValue(getAddress()), dataIndex);
+
+    switch (dataLayout.tag()) {
+    case DataLayout.noTag:
+    default:
+      throw new InternalError();
+    case DataLayout.bitDataTag:
+      return new BitData(dataLayout);
+    case DataLayout.counterDataTag:
+      return new CounterData(dataLayout);
+    case DataLayout.jumpDataTag:
+      return new JumpData(dataLayout);
+    case DataLayout.receiverTypeDataTag:
+      return new ciReceiverTypeData(dataLayout);
+    case DataLayout.virtualCallDataTag:
+      return new ciVirtualCallData(dataLayout);
+    case DataLayout.retDataTag:
+      return new RetData(dataLayout);
+    case DataLayout.branchDataTag:
+      return new BranchData(dataLayout);
+    case DataLayout.multiBranchDataTag:
+      return new MultiBranchData(dataLayout);
+    }
+  }
+
+  int dpToDi(int dp) {
+    return dp;
+  }
+
+  int firstDi() { return 0; }
+  ProfileData firstData() { return dataAt(firstDi()); }
+  ProfileData nextData(ProfileData current) {
+    int currentIndex = dpToDi(current.dp());
+    int nextIndex = currentIndex + current.sizeInBytes();
+    return dataAt(nextIndex);
+  }
+  boolean isValid(ProfileData current) { return current != null; }
+
+  public void printDataOn(PrintStream st) {
+    ProfileData data = firstData();
+    for ( ; isValid(data); data = nextData(data)) {
+      st.print(dpToDi(data.dp()));
+      st.print(" ");
+      // st->fillTo(6);
+      data.printDataOn(st);
+    }
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethodKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciMethodKlass extends ciKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciMethodKlass");
+  }
+
+
+  public ciMethodKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciObjArrayKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciObjArrayKlass extends ciArrayKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciObjArrayKlass");
+    elementKlassField = type.getAddressField("_element_klass");
+    baseElementKlassField = type.getAddressField("_base_element_klass");
+  }
+
+  private static AddressField elementKlassField;
+  private static AddressField baseElementKlassField;
+
+  public ciObjArrayKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciObjArrayKlassKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciObjArrayKlassKlass extends ciArrayKlassKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciObjArrayKlassKlass");
+  }
+
+
+  public ciObjArrayKlassKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciObject.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciObject extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciObject");
+    identField = new CIntField(type.getCIntegerField("_ident"), 0);
+    klassField = type.getAddressField("_klass");
+    handleField = type.getAddressField("_handle");
+  }
+
+  private static CIntField identField;
+  private static AddressField klassField;
+  private static AddressField handleField;
+
+  public Oop getOop() {
+    OopHandle oh =  handleField.getValue(getAddress()).getOopHandleAt(0);
+    return VM.getVM().getObjectHeap().newOop(oh);
+  }
+
+  public ciObject(Address addr) {
+    super(addr);
+  }
+
+  public void printOn(PrintStream out) {
+    getOop().printValueOn(out);
+    out.println();
+  }
+
+  public String toString() {
+    return getOop().toString();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciObjectFactory.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.lang.reflect.Constructor;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.utilities.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciObjectFactory extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciObjectFactory");
+    unloadedMethodsField = type.getAddressField("_unloaded_methods");
+    ciObjectsField = type.getAddressField("_ci_objects");
+    symbolsField = type.getAddressField("_symbols");
+
+    ciObjectConstructor = new VirtualBaseConstructor<ciObject>(db, db.lookupType("ciObject"), "sun.jvm.hotspot.ci", ciObject.class);
+    ciSymbolConstructor = new VirtualBaseConstructor<ciSymbol>(db, db.lookupType("ciSymbol"), "sun.jvm.hotspot.ci", ciSymbol.class);
+  }
+
+  private static AddressField unloadedMethodsField;
+  private static AddressField ciObjectsField;
+  private static AddressField symbolsField;
+
+  private static VirtualBaseConstructor<ciObject> ciObjectConstructor;
+  private static VirtualBaseConstructor<ciSymbol> ciSymbolConstructor;
+
+  public static ciObject get(Address addr) {
+    if (addr == null) return null;
+
+    return (ciObject)ciObjectConstructor.instantiateWrapperFor(addr);
+  }
+
+  public GrowableArray<ciObject> objects() {
+    return GrowableArray.create(ciObjectsField.getValue(getAddress()), ciObjectConstructor);
+  }
+
+  public GrowableArray<ciSymbol> symbols() {
+    return GrowableArray.create(symbolsField.getValue(getAddress()), ciSymbolConstructor);
+  }
+
+  public ciObjectFactory(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciReceiverTypeData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciReceiverTypeData extends ReceiverTypeData {
+  public ciReceiverTypeData(DataLayout data) {
+    super(data);
+  }
+
+  public Klass receiver(int row) {
+      throw new InternalError("should not call");
+  }
+
+  public ciKlass receiverAt(int row) {
+    //assert((uint)row < rowLimit(), "oob");
+    ciObject recv = ciObjectFactory.get(addressAt(receiverCellIndex(row)));
+    if (recv != null && !(recv instanceof ciKlass)) {
+      System.err.println(recv);
+    }
+    //assert(recv == NULL || recv->isKlass(), "wrong type");
+    return (ciKlass)recv;
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciSymbol.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciSymbol extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type = db.lookupType("ciSymbol");
+    identField = type.getCIntegerField("_ident");
+    symbolField = type.getAddressField("_symbol");
+  }
+
+  private static AddressField symbolField;
+  private static CIntegerField identField;
+
+  public String asUtf88() {
+    Symbol sym = Symbol.create(symbolField.getValue(getAddress()));
+    return sym.asString();
+  }
+
+  public ciSymbol(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciType.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciType extends ciObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciType");
+    basicTypeField = new CIntField(type.getCIntegerField("_basic_type"), 0);
+  }
+
+  private static CIntField basicTypeField;
+
+  public ciType(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciTypeArrayKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciTypeArrayKlass extends ciArrayKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciTypeArrayKlass");
+  }
+
+  public ciTypeArrayKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciTypeArrayKlassKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciTypeArrayKlassKlass extends ciArrayKlassKlass {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ciTypeArrayKlassKlass");
+  }
+
+
+  public ciTypeArrayKlassKlass(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciVirtualCallData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.ci;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ciVirtualCallData extends VirtualCallData {
+  public ciVirtualCallData(DataLayout data) {
+    super(data);
+  }
+
+  public Klass receiver(int row) {
+      throw new InternalError("should not call");
+  }
+
+  public ciKlass receiverAt(int row) {
+    //assert((uint)row < rowLimit(), "oob");
+    ciObject recv = ciObjectFactory.get(addressAt(receiverCellIndex(row)));
+    if (recv != null && !(recv instanceof ciKlass)) {
+      System.err.println(recv);
+    }
+    //assert(recv == NULL || recv->isKlass(), "wrong type");
+    return (ciKlass)recv;
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Thu Dec 22 15:46:11 2011 +0000
@@ -102,6 +102,11 @@
   /** On-Stack Replacement method */
   public boolean isOSRMethod()          { return false; }
 
+  public NMethod asNMethodOrNull() {
+    if (isNMethod()) return (NMethod)this;
+    return null;
+  }
+
   // Boundaries
   public Address headerBegin() {
     return addr;
@@ -195,7 +200,7 @@
   }
 
   // Returns true, if the next frame is responsible for GC'ing oops passed as arguments
-  public boolean callerMustGCArguments(JavaThread thread) { return false; }
+  public boolean callerMustGCArguments() { return false; }
 
   public String getName() {
     return CStringUtilities.getString(nameField.getValue(addr));
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Thu Dec 22 15:46:11 2011 +0000
@@ -59,6 +59,7 @@
     virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class);
     virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class);
     virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class);
+    virtualConstructor.addMapping("MethodHandlesAdapterBlob", MethodHandlesAdapterBlob.class);
     virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class);
     virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class);
     if (VM.getVM().isServerCompiler()) {
@@ -126,6 +127,10 @@
       Assert.that(result.blobContains(start) || result.blobContains(start.addOffsetTo(8)),
                                                                     "found wrong CodeBlob");
     }
+    if (result.isRicochetBlob()) {
+      // This should probably be done for other SingletonBlobs
+      return VM.getVM().ricochetBlob();
+    }
     return result;
   }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/MethodHandlesAdapterBlob.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.code;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+public class MethodHandlesAdapterBlob extends AdapterBlob {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static void initialize(TypeDataBase db) {
+    Type type = db.lookupType("MethodHandlesAdapterBlob");
+
+    // FIXME: add any needed fields
+  }
+
+  public MethodHandlesAdapterBlob(Address addr) {
+    super(addr);
+  }
+
+  public boolean isMethodHandlesAdapterBlob() {
+    return true;
+  }
+
+  public String getName() {
+    return "MethodHandlesAdapterBlob: " + super.getName();
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,7 @@
   /** Offsets for different nmethod parts */
   private static CIntegerField exceptionOffsetField;
   private static CIntegerField deoptOffsetField;
+  private static CIntegerField deoptMhOffsetField;
   private static CIntegerField origPCOffsetField;
   private static CIntegerField stubOffsetField;
   private static CIntegerField oopsOffsetField;
@@ -95,6 +96,7 @@
 
     exceptionOffsetField        = type.getCIntegerField("_exception_offset");
     deoptOffsetField            = type.getCIntegerField("_deoptimize_offset");
+    deoptMhOffsetField          = type.getCIntegerField("_deoptimize_mh_offset");
     origPCOffsetField           = type.getCIntegerField("_orig_pc_offset");
     stubOffsetField             = type.getCIntegerField("_stub_offset");
     oopsOffsetField             = type.getCIntegerField("_oops_offset");
@@ -136,10 +138,11 @@
   /** Boundaries for different parts */
   public Address constantsBegin()       { return contentBegin();                                     }
   public Address constantsEnd()         { return getEntryPoint();                                    }
-  public Address instsBegin()           { return codeBegin();                                       }
+  public Address instsBegin()           { return codeBegin();                                        }
   public Address instsEnd()             { return headerBegin().addOffsetTo(getStubOffset());         }
   public Address exceptionBegin()       { return headerBegin().addOffsetTo(getExceptionOffset());    }
-  public Address deoptBegin()           { return headerBegin().addOffsetTo(getDeoptOffset());        }
+  public Address deoptHandlerBegin()    { return headerBegin().addOffsetTo(getDeoptOffset());        }
+  public Address deoptMhHandlerBegin()  { return headerBegin().addOffsetTo(getDeoptMhOffset());      }
   public Address stubBegin()            { return headerBegin().addOffsetTo(getStubOffset());         }
   public Address stubEnd()              { return headerBegin().addOffsetTo(getOopsOffset());         }
   public Address oopsBegin()            { return headerBegin().addOffsetTo(getOopsOffset());         }
@@ -187,6 +190,8 @@
   public boolean handlerTableContains(Address addr) { return handlerTableBegin().lessThanOrEqual(addr) && handlerTableEnd().greaterThan(addr); }
   public boolean nulChkTableContains (Address addr) { return nulChkTableBegin() .lessThanOrEqual(addr) && nulChkTableEnd() .greaterThan(addr); }
 
+  public int getOopsLength() { return (int) (oopsSize() / VM.getVM().getOopSize()); }
+
   /** Entry points */
   public Address getEntryPoint()         { return entryPointField.getValue(addr);         }
   public Address getVerifiedEntryPoint() { return verifiedEntryPointField.getValue(addr); }
@@ -195,7 +200,7 @@
   public OopHandle getOopAt(int index) {
     if (index == 0) return null;
     if (Assert.ASSERTS_ENABLED) {
-      Assert.that(index > 0 && index <= oopsSize(), "must be a valid non-zero index");
+      Assert.that(index > 0 && index <= getOopsLength(), "must be a valid non-zero index");
     }
     return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
   }
@@ -250,6 +255,22 @@
     return (int) scavengeRootStateField.getValue(addr);
   }
 
+  // MethodHandle
+  public boolean isMethodHandleReturn(Address returnPc) {
+    // Hard to read a bit fields from Java and it's only there for performance
+    // so just go directly to the PCDesc
+    // if (!hasMethodHandleInvokes())  return false;
+    PCDesc pd = getPCDescAt(returnPc);
+    if (pd == null)
+      return false;
+    return pd.isMethodHandleInvoke();
+  }
+
+  // Deopt
+  // Return true is the PC is one would expect if the frame is being deopted.
+  public boolean isDeoptPc      (Address pc) { return isDeoptEntry(pc) || isDeoptMhEntry(pc); }
+  public boolean isDeoptEntry   (Address pc) { return pc == deoptHandlerBegin(); }
+  public boolean isDeoptMhEntry (Address pc) { return pc == deoptMhHandlerBegin(); }
 
   /** Tells whether frames described by this nmethod can be
       deoptimized. Note: native wrappers cannot be deoptimized. */
@@ -388,6 +409,7 @@
   private int getEntryBCI()           { return (int) entryBCIField          .getValue(addr); }
   private int getExceptionOffset()    { return (int) exceptionOffsetField   .getValue(addr); }
   private int getDeoptOffset()        { return (int) deoptOffsetField       .getValue(addr); }
+  private int getDeoptMhOffset()      { return (int) deoptMhOffsetField     .getValue(addr); }
   private int getStubOffset()         { return (int) stubOffsetField        .getValue(addr); }
   private int getOopsOffset()         { return (int) oopsOffsetField        .getValue(addr); }
   private int getScopesDataOffset()   { return (int) scopesDataOffsetField  .getValue(addr); }
--- a/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java	Thu Dec 22 15:46:11 2011 +0000
@@ -38,6 +38,9 @@
   private static CIntegerField scopeDecodeOffsetField;
   private static CIntegerField objDecodeOffsetField;
   private static CIntegerField pcFlagsField;
+  private static int reexecuteMask;
+  private static int isMethodHandleInvokeMask;
+  private static int returnOopMask;
 
   static {
     VM.registerVMInitializedObserver(new Observer() {
@@ -54,6 +57,10 @@
     scopeDecodeOffsetField = type.getCIntegerField("_scope_decode_offset");
     objDecodeOffsetField   = type.getCIntegerField("_obj_decode_offset");
     pcFlagsField           = type.getCIntegerField("_flags");
+
+    reexecuteMask            = db.lookupIntConstant("PcDesc::PCDESC_reexecute");
+    isMethodHandleInvokeMask = db.lookupIntConstant("PcDesc::PCDESC_is_method_handle_invoke");
+    returnOopMask            = db.lookupIntConstant("PcDesc::PCDESC_return_oop");
   }
 
   public PCDesc(Address addr) {
@@ -81,7 +88,12 @@
 
   public boolean getReexecute() {
     int flags = (int)pcFlagsField.getValue(addr);
-    return ((flags & 0x1)== 1); //first is the reexecute bit
+    return (flags & reexecuteMask) != 0;
+  }
+
+  public boolean isMethodHandleInvoke() {
+    int flags = (int)pcFlagsField.getValue(addr);
+    return (flags & isMethodHandleInvokeMask) != 0;
   }
 
   public void print(NMethod code) {
--- a/agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java	Thu Dec 22 15:46:11 2011 +0000
@@ -41,11 +41,15 @@
   }
 
   private static void initialize(TypeDataBase db) {
-    // Type type = db.lookupType("RicochetBlob");
+    Type type = db.lookupType("RicochetBlob");
 
-    // FIXME: add any needed fields
+    bounceOffsetField                = type.getCIntegerField("_bounce_offset");
+    exceptionOffsetField             = type.getCIntegerField("_exception_offset");
   }
 
+  private static CIntegerField bounceOffsetField;
+  private static CIntegerField exceptionOffsetField;
+
   public RicochetBlob(Address addr) {
     super(addr);
   }
@@ -53,4 +57,14 @@
   public boolean isRicochetBlob() {
     return true;
   }
+
+  public Address bounceAddr() {
+    return codeBegin().addOffsetTo(bounceOffsetField.getValue(addr));
+  }
+
+  public boolean returnsToBounceAddr(Address pc) {
+    Address bouncePc = bounceAddr();
+    return (pc.equals(bouncePc) || pc.addOffsetTo(Frame.pcReturnOffset()).equals(bouncePc));
+  }
+
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/code/RuntimeStub.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/RuntimeStub.java	Thu Dec 22 15:46:11 2011 +0000
@@ -30,6 +30,8 @@
 import sun.jvm.hotspot.types.*;
 
 public class RuntimeStub extends CodeBlob {
+  private static CIntegerField callerMustGCArgumentsField;
+
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -40,6 +42,7 @@
 
   private static void initialize(TypeDataBase db) {
     Type type = db.lookupType("RuntimeStub");
+    callerMustGCArgumentsField                = type.getCIntegerField("_caller_must_gc_arguments");
 
     // FIXME: add any needed fields
   }
@@ -52,6 +55,11 @@
     return true;
   }
 
+  public boolean callerMustGCArguments() {
+    return callerMustGCArgumentsField.getValue(addr) != 0;
+  }
+
+
   public String getName() {
     return "RuntimeStub: " + super.getName();
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.compiler;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.opto.*;
+import sun.jvm.hotspot.prims.JvmtiExport;
+import sun.jvm.hotspot.types.*;
+
+public class CompileTask extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("CompileTask");
+    methodField = type.getAddressField("_method");
+    osrBciField = new CIntField(type.getCIntegerField("_osr_bci"), 0);
+  }
+
+  private static AddressField methodField;
+  private static CIntField osrBciField;
+
+  public CompileTask(Address addr) {
+    super(addr);
+  }
+
+  public Method method() {
+    OopHandle oh =  methodField.getValue(getAddress()).getOopHandleAt(0);
+    return (Method)VM.getVM().getObjectHeap().newOop(oh);
+  }
+
+  public int osrBci() {
+    return (int)osrBciField.getValue(getAddress());
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapSet.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapSet.java	Thu Dec 22 15:46:11 2011 +0000
@@ -246,7 +246,7 @@
     }
 
     // Check if caller must update oop argument
-    regMap.setIncludeArgumentOops(cb.callerMustGCArguments(regMap.getThread()));
+    regMap.setIncludeArgumentOops(cb.callerMustGCArguments());
 
     int nofCallee = 0;
     Address[] locs = new Address[2 * REG_COUNT + 1];
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/AddressException.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/AddressException.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,4 +39,8 @@
   public long getAddress() {
     return addr;
   }
+
+  public String getMessage() {
+    return Long.toHexString(addr);
+  }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxAddress.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,395 +0,0 @@
-/*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx;
-
-import sun.jvm.hotspot.debugger.*;
-
-class DbxAddress implements Address {
-  protected DbxDebugger debugger;
-  protected long addr;
-
-  DbxAddress(DbxDebugger debugger, long addr) {
-    this.debugger = debugger;
-    this.addr = addr;
-  }
-
-  //
-  // Basic Java routines
-  //
-
-  public boolean equals(Object arg) {
-    if (arg == null) {
-      return false;
-    }
-
-    if (!(arg instanceof DbxAddress)) {
-      return false;
-    }
-
-    return (addr == ((DbxAddress) arg).addr);
-  }
-
-  public int hashCode() {
-    // FIXME: suggestions on a better hash code?
-    return (int) addr;
-  }
-
-  public String toString() {
-    return debugger.addressValueToString(addr);
-  }
-
-  //
-  // C/C++-related routines
-  //
-
-  public long getCIntegerAt(long offset, long numBytes, boolean isUnsigned) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readCInteger(addr + offset, numBytes, isUnsigned);
-  }
-
-  public Address getAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readAddress(addr + offset);
-  }
-  public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readCompOopAddress(addr + offset);
-  }
-
-  //
-  // Java-related routines
-  //
-
-  public boolean getJBooleanAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJBoolean(addr + offset);
-  }
-
-  public byte getJByteAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJByte(addr + offset);
-  }
-
-  public char getJCharAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJChar(addr + offset);
-  }
-
-  public double getJDoubleAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJDouble(addr + offset);
-  }
-
-  public float getJFloatAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJFloat(addr + offset);
-  }
-
-  public int getJIntAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJInt(addr + offset);
-  }
-
-  public long getJLongAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJLong(addr + offset);
-  }
-
-  public short getJShortAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJShort(addr + offset);
-  }
-
-  public OopHandle getOopHandleAt(long offset)
-    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
-    return debugger.readOopHandle(addr + offset);
-  }
-
-  public OopHandle getCompOopHandleAt(long offset)
-    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
-    return debugger.readCompOopHandle(addr + offset);
-  }
-
-  // Mutators -- not implemented for now (FIXME)
-  public void setCIntegerAt(long offset, long numBytes, long value) {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void setAddressAt(long offset, Address value) {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void       setJBooleanAt      (long offset, boolean value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void       setJByteAt         (long offset, byte value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void       setJCharAt         (long offset, char value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void       setJDoubleAt       (long offset, double value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void       setJFloatAt        (long offset, float value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void       setJIntAt          (long offset, int value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void       setJLongAt         (long offset, long value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void       setJShortAt        (long offset, short value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    throw new DebuggerException("Unimplemented");
-  }
-  public void       setOopHandleAt     (long offset, OopHandle value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    throw new DebuggerException("Unimplemented");
-  }
-
-  //
-  // Arithmetic operations -- necessary evil.
-  //
-
-  public Address    addOffsetTo       (long offset) throws UnsupportedOperationException {
-    long value = addr + offset;
-    if (value == 0) {
-      return null;
-    }
-    return new DbxAddress(debugger, value);
-  }
-
-  public OopHandle  addOffsetToAsOopHandle(long offset) throws UnsupportedOperationException {
-    long value = addr + offset;
-    if (value == 0) {
-      return null;
-    }
-    return new DbxOopHandle(debugger, value);
-  }
-
-  /** (FIXME: any signed/unsigned issues? Should this work for
-      OopHandles?) */
-  public long       minus(Address arg) {
-    if (arg == null) {
-      return addr;
-    }
-    return addr - ((DbxAddress) arg).addr;
-  }
-
-  // Two's complement representation.
-  // All negative numbers are larger than positive numbers.
-  // Numbers with the same sign can be compared normally.
-  // Test harness is below in main().
-
-  public boolean    lessThan          (Address arg) {
-    if (arg == null) {
-      return false;
-    }
-    DbxAddress dbxArg = (DbxAddress) arg;
-    if ((addr >= 0) && (dbxArg.addr < 0)) {
-      return true;
-    }
-    if ((addr < 0) && (dbxArg.addr >= 0)) {
-      return false;
-    }
-    return (addr < dbxArg.addr);
-  }
-
-  public boolean    lessThanOrEqual   (Address arg) {
-    if (arg == null) {
-      return false;
-    }
-    DbxAddress dbxArg = (DbxAddress) arg;
-    if ((addr >= 0) && (dbxArg.addr < 0)) {
-      return true;
-    }
-    if ((addr < 0) && (dbxArg.addr >= 0)) {
-      return false;
-    }
-    return (addr <= dbxArg.addr);
-  }
-
-  public boolean    greaterThan       (Address arg) {
-    if (arg == null) {
-      return true;
-    }
-    DbxAddress dbxArg = (DbxAddress) arg;
-    if ((addr >= 0) && (dbxArg.addr < 0)) {
-      return false;
-    }
-    if ((addr < 0) && (dbxArg.addr >= 0)) {
-      return true;
-    }
-    return (addr > dbxArg.addr);
-  }
-
-  public boolean    greaterThanOrEqual(Address arg) {
-    if (arg == null) {
-      return true;
-    }
-    DbxAddress dbxArg = (DbxAddress) arg;
-    if ((addr >= 0) && (dbxArg.addr < 0)) {
-      return false;
-    }
-    if ((addr < 0) && (dbxArg.addr >= 0)) {
-      return true;
-    }
-    return (addr >= dbxArg.addr);
-  }
-
-  public Address    andWithMask(long mask) throws UnsupportedOperationException {
-    long value = addr & mask;
-    if (value == 0) {
-      return null;
-    }
-    return new DbxAddress(debugger, value);
-  }
-
-  public Address    orWithMask(long mask) throws UnsupportedOperationException {
-    long value = addr | mask;
-    if (value == 0) {
-      return null;
-    }
-    return new DbxAddress(debugger, value);
-  }
-
-  public Address    xorWithMask(long mask) throws UnsupportedOperationException {
-    long value = addr ^ mask;
-    if (value == 0) {
-      return null;
-    }
-    return new DbxAddress(debugger, value);
-  }
-
-
-  //--------------------------------------------------------------------------------
-  // Internals only below this point
-  //
-
-  long getValue() {
-    return addr;
-  }
-
-
-  private static void check(boolean arg, String failMessage) {
-    if (!arg) {
-      System.err.println(failMessage + ": FAILED");
-      System.exit(1);
-    }
-  }
-
-  // Test harness
-  public static void main(String[] args) {
-    // p/n indicates whether the interior address is really positive
-    // or negative. In unsigned terms, p1 < p2 < n1 < n2.
-
-    DbxAddress p1 = new DbxAddress(null, 0x7FFFFFFFFFFFFFF0L);
-    DbxAddress p2 = (DbxAddress) p1.addOffsetTo(10);
-    DbxAddress n1 = (DbxAddress) p2.addOffsetTo(10);
-    DbxAddress n2 = (DbxAddress) n1.addOffsetTo(10);
-
-    // lessThan positive tests
-    check(p1.lessThan(p2), "lessThan 1");
-    check(p1.lessThan(n1), "lessThan 2");
-    check(p1.lessThan(n2), "lessThan 3");
-    check(p2.lessThan(n1), "lessThan 4");
-    check(p2.lessThan(n2), "lessThan 5");
-    check(n1.lessThan(n2), "lessThan 6");
-
-    // lessThan negative tests
-    check(!p1.lessThan(p1), "lessThan 7");
-    check(!p2.lessThan(p2), "lessThan 8");
-    check(!n1.lessThan(n1), "lessThan 9");
-    check(!n2.lessThan(n2), "lessThan 10");
-
-    check(!p2.lessThan(p1), "lessThan 11");
-    check(!n1.lessThan(p1), "lessThan 12");
-    check(!n2.lessThan(p1), "lessThan 13");
-    check(!n1.lessThan(p2), "lessThan 14");
-    check(!n2.lessThan(p2), "lessThan 15");
-    check(!n2.lessThan(n1), "lessThan 16");
-
-    // lessThanOrEqual positive tests
-    check(p1.lessThanOrEqual(p1), "lessThanOrEqual 1");
-    check(p2.lessThanOrEqual(p2), "lessThanOrEqual 2");
-    check(n1.lessThanOrEqual(n1), "lessThanOrEqual 3");
-    check(n2.lessThanOrEqual(n2), "lessThanOrEqual 4");
-
-    check(p1.lessThanOrEqual(p2), "lessThanOrEqual 5");
-    check(p1.lessThanOrEqual(n1), "lessThanOrEqual 6");
-    check(p1.lessThanOrEqual(n2), "lessThanOrEqual 7");
-    check(p2.lessThanOrEqual(n1), "lessThanOrEqual 8");
-    check(p2.lessThanOrEqual(n2), "lessThanOrEqual 9");
-    check(n1.lessThanOrEqual(n2), "lessThanOrEqual 10");
-
-    // lessThanOrEqual negative tests
-    check(!p2.lessThanOrEqual(p1), "lessThanOrEqual 11");
-    check(!n1.lessThanOrEqual(p1), "lessThanOrEqual 12");
-    check(!n2.lessThanOrEqual(p1), "lessThanOrEqual 13");
-    check(!n1.lessThanOrEqual(p2), "lessThanOrEqual 14");
-    check(!n2.lessThanOrEqual(p2), "lessThanOrEqual 15");
-    check(!n2.lessThanOrEqual(n1), "lessThanOrEqual 16");
-
-    // greaterThan positive tests
-    check(n2.greaterThan(p1), "greaterThan 1");
-    check(n2.greaterThan(p2), "greaterThan 2");
-    check(n2.greaterThan(n1), "greaterThan 3");
-    check(n1.greaterThan(p1), "greaterThan 4");
-    check(n1.greaterThan(p2), "greaterThan 5");
-    check(p2.greaterThan(p1), "greaterThan 6");
-
-    // greaterThan negative tests
-    check(!p1.greaterThan(p1), "greaterThan 7");
-    check(!p2.greaterThan(p2), "greaterThan 8");
-    check(!n1.greaterThan(n1), "greaterThan 9");
-    check(!n2.greaterThan(n2), "greaterThan 10");
-
-    check(!p1.greaterThan(n2), "greaterThan 11");
-    check(!p2.greaterThan(n2), "greaterThan 12");
-    check(!n1.greaterThan(n2), "greaterThan 13");
-    check(!p1.greaterThan(n1), "greaterThan 14");
-    check(!p2.greaterThan(n1), "greaterThan 15");
-    check(!p1.greaterThan(p2), "greaterThan 16");
-
-    // greaterThanOrEqual positive tests
-    check(p1.greaterThanOrEqual(p1), "greaterThanOrEqual 1");
-    check(p2.greaterThanOrEqual(p2), "greaterThanOrEqual 2");
-    check(n1.greaterThanOrEqual(n1), "greaterThanOrEqual 3");
-    check(n2.greaterThanOrEqual(n2), "greaterThanOrEqual 4");
-
-    check(n2.greaterThanOrEqual(p1), "greaterThanOrEqual 5");
-    check(n2.greaterThanOrEqual(p2), "greaterThanOrEqual 6");
-    check(n2.greaterThanOrEqual(n1), "greaterThanOrEqual 7");
-    check(n1.greaterThanOrEqual(p1), "greaterThanOrEqual 8");
-    check(n1.greaterThanOrEqual(p2), "greaterThanOrEqual 9");
-    check(p2.greaterThanOrEqual(p1), "greaterThanOrEqual 10");
-
-    // greaterThanOrEqual negative tests
-    check(!p1.greaterThanOrEqual(n2), "greaterThanOrEqual 11");
-    check(!p2.greaterThanOrEqual(n2), "greaterThanOrEqual 12");
-    check(!n1.greaterThanOrEqual(n2), "greaterThanOrEqual 13");
-    check(!p1.greaterThanOrEqual(n1), "greaterThanOrEqual 14");
-    check(!p2.greaterThanOrEqual(n1), "greaterThanOrEqual 15");
-    check(!p1.greaterThanOrEqual(p2), "greaterThanOrEqual 16");
-
-    System.err.println("DbxAddress: all tests passed successfully.");
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebugger.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx;
-
-import sun.jvm.hotspot.debugger.*;
-
-/** An extension of the JVMDebugger interface with a few additions to
-    support 32-bit vs. 64-bit debugging as well as features required
-    by the architecture-specific subpackages. */
-
-public interface DbxDebugger extends JVMDebugger {
-  public String       addressValueToString(long address) throws DebuggerException;
-  public boolean      readJBoolean(long address) throws DebuggerException;
-  public byte         readJByte(long address) throws DebuggerException;
-  public char         readJChar(long address) throws DebuggerException;
-  public double       readJDouble(long address) throws DebuggerException;
-  public float        readJFloat(long address) throws DebuggerException;
-  public int          readJInt(long address) throws DebuggerException;
-  public long         readJLong(long address) throws DebuggerException;
-  public short        readJShort(long address) throws DebuggerException;
-  public long         readCInteger(long address, long numBytes, boolean isUnsigned)
-    throws DebuggerException;
-  public DbxAddress   readAddress(long address) throws DebuggerException;
-  public DbxAddress   readCompOopAddress(long address) throws DebuggerException;
-  public DbxOopHandle readOopHandle(long address) throws DebuggerException;
-  public DbxOopHandle readCompOopHandle(long address) throws DebuggerException;
-  public long[]       getThreadIntegerRegisterSet(int tid) throws DebuggerException;
-  public Address      newAddress(long value) throws DebuggerException;
-
-  // NOTE: this interface implicitly contains the following methods:
-  // From the Debugger interface via JVMDebugger
-  //   public void attach(int processID) throws DebuggerException;
-  //   public void attach(String executableName, String coreFileName) throws DebuggerException;
-  //   public boolean detach();
-  //   public Address parseAddress(String addressString) throws NumberFormatException;
-  //   public long getAddressValue(Address addr) throws DebuggerException;
-  //   public String getOS();
-  //   public String getCPU();
-  // From the SymbolLookup interface via Debugger and JVMDebugger
-  //   public Address lookup(String objectName, String symbol);
-  //   public OopHandle lookupOop(String objectName, String symbol);
-  // From the JVMDebugger interface
-  //   public void configureJavaPrimitiveTypeSizes(long jbooleanSize,
-  //                                               long jbyteSize,
-  //                                               long jcharSize,
-  //                                               long jdoubleSize,
-  //                                               long jfloatSize,
-  //                                               long jintSize,
-  //                                               long jlongSize,
-  //                                               long jshortSize);
-  // From the ThreadAccess interface via Debugger and JVMDebugger
-  //   public ThreadProxy getThreadForIdentifierAddress(Address addr);
-  //   public ThreadProxy getThreadForThreadId(long id);
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebuggerLocal.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,744 +0,0 @@
-/*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx;
-
-import java.io.*;
-import java.net.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.dbx.sparc.*;
-import sun.jvm.hotspot.debugger.dbx.x86.*;
-import sun.jvm.hotspot.debugger.cdbg.CDebugger;
-import sun.jvm.hotspot.utilities.*;
-
-/** <P> An implementation of the JVMDebugger interface which sits on
-    top of dbx and relies on the SA's dbx import module for
-    communication with the debugger. </P>
-
-    <P> <B>NOTE</B> that since we have the notion of fetching "Java
-    primitive types" from the remote process (which might have
-    different sizes than we expect) we have a bootstrapping
-    problem. We need to know the sizes of these types before we can
-    fetch them. The current implementation solves this problem by
-    requiring that it be configured with these type sizes before they
-    can be fetched. The readJ(Type) routines here will throw a
-    RuntimeException if they are called before the debugger is
-    configured with the Java primitive type sizes. </P>
-*/
-
-public class DbxDebuggerLocal extends DebuggerBase implements DbxDebugger {
-  // These may be set by DbxDebuggerRemote
-  protected boolean unalignedAccessesOkay;
-  protected DbxThreadFactory threadFactory;
-
-  private String dbxPathName;
-  private String[] dbxSvcAgentDSOPathNames;
-  private Process dbxProcess;
-  private StreamMonitor dbxOutStreamMonitor;
-  private StreamMonitor dbxErrStreamMonitor;
-  private PrintWriter dbxOstr;
-  private PrintWriter out;
-  private InputLexer in;
-  private Socket importModuleSocket;
-  private static final int PORT = 21928;
-  private static final int  LONG_TIMEOUT = 60000;
-  private static final int  DBX_MODULE_NOT_FOUND      = 101;
-  private static final int  DBX_MODULE_LOADED         = 102;
-
-  //--------------------------------------------------------------------------------
-  // Implementation of Debugger interface
-  //
-
-  /** <P> machDesc may be null if it couldn't be determined yet; i.e.,
-      if we're on SPARC, we need to ask the remote process whether
-      we're in 32- or 64-bit mode. </P>
-
-      <P> useCache should be set to true if debugging is being done
-      locally, and to false if the debugger is being created for the
-      purpose of supporting remote debugging. </P> */
-  public DbxDebuggerLocal(MachineDescription machDesc,
-                          String dbxPathName,
-                          String[] dbxSvcAgentDSOPathNames,
-                          boolean useCache) {
-    this.machDesc = machDesc;
-    this.dbxPathName = dbxPathName;
-    this.dbxSvcAgentDSOPathNames = dbxSvcAgentDSOPathNames;
-    int cacheNumPages;
-    int cachePageSize;
-    if (PlatformInfo.getCPU().equals("sparc")) {
-      cacheNumPages = parseCacheNumPagesProperty(2048);
-      cachePageSize = 8192;
-      threadFactory = new DbxSPARCThreadFactory(this);
-    } else if (PlatformInfo.getCPU().equals("x86")) {
-      cacheNumPages = 4096;
-      cachePageSize = 4096;
-      threadFactory = new DbxX86ThreadFactory(this);
-      unalignedAccessesOkay = true;
-    } else {
-      throw new RuntimeException("Thread access for CPU architecture " + PlatformInfo.getCPU() + " not yet supported");
-    }
-    if (useCache) {
-      // Cache portion of the remote process's address space.
-      // Fetching data over the socket connection to dbx is relatively
-      // slow. For now, this cache works best if it covers the entire
-      // heap of the remote process. FIXME: at least should make this
-      // tunable from the outside, i.e., via the UI. This is a 16 MB
-      // cache divided on SPARC into 2048 8K pages and on x86 into
-      // 4096 4K pages; the page size must be adjusted to be the OS's
-      // page size. (FIXME: should pick this up from the debugger.)
-      initCache(cachePageSize, cacheNumPages);
-    }
-  }
-
-  /** Only called by DbxDebuggerRemote */
-  protected DbxDebuggerLocal() {
-  }
-
-  /** FIXME: implement this with a Runtime.exec() of ps followed by
-      parsing of its output */
-  public boolean hasProcessList() throws DebuggerException {
-    return false;
-  }
-
-  public List getProcessList() throws DebuggerException {
-    throw new DebuggerException("Not yet supported");
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public synchronized void attach(int processID) throws DebuggerException {
-    try {
-      launchProcess();
-      dbxErrStreamMonitor.addTrigger("dbx: no process", 1);
-      dbxErrStreamMonitor.addTrigger("dbx: Cannot open", 1);
-      dbxErrStreamMonitor.addTrigger("dbx: Cannot find", DBX_MODULE_NOT_FOUND);
-      dbxOstr = new PrintWriter(dbxProcess.getOutputStream(), true);
-      dbxOstr.println("debug - " + processID);
-      dbxOstr.println("kprint -u2 \\(ready\\)");
-      boolean seen = dbxErrStreamMonitor.waitFor("(ready)", LONG_TIMEOUT);
-      if (!seen) {
-        detach();
-        throw new DebuggerException("Timed out while connecting to process " + processID);
-      }
-      List retVals = dbxErrStreamMonitor.getTriggersSeen();
-      if (retVals.contains(new Integer(1))) {
-        detach();
-        throw new DebuggerException("No such process " + processID);
-      }
-
-      // Throws DebuggerException upon failure
-      importDbxModule();
-
-      dbxOstr.println("svc_agent_run");
-
-      connectToImportModule();
-
-      // Set "fail fast" mode on process memory reads
-      printlnToOutput("peek_fail_fast 1");
-    }
-    catch (IOException e) {
-      detach();
-      throw new DebuggerException("Error while connecting to dbx process", e);
-    }
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public synchronized void attach(String executableName, String coreFileName) throws DebuggerException {
-    try {
-      launchProcess();
-      // Missing executable
-      dbxErrStreamMonitor.addTrigger("dbx: Cannot open", 1);
-      // Missing core file
-      dbxErrStreamMonitor.addTrigger("dbx: can't read", 2);
-      // Corrupt executable
-      dbxErrStreamMonitor.addTrigger("dbx: File", 3);
-      // Corrupt core file
-      dbxErrStreamMonitor.addTrigger("dbx: Unable to read", 4);
-      // Mismatched core and executable
-      dbxErrStreamMonitor.addTrigger("dbx: core object name", 5);
-      // Missing loadobject
-      dbxErrStreamMonitor.addTrigger("dbx: can't stat", 6);
-      // Successful load of svc module
-      dbxOstr = new PrintWriter(dbxProcess.getOutputStream(), true);
-      dbxOstr.println("debug " + executableName + " " + coreFileName);
-      dbxOstr.println("kprint -u2 \\(ready\\)");
-      boolean seen = dbxErrStreamMonitor.waitFor("(ready)", LONG_TIMEOUT);
-      if (!seen) {
-        detach();
-        throw new DebuggerException("Timed out while attaching to core file");
-      }
-      List retVals = dbxErrStreamMonitor.getTriggersSeen();
-      if (retVals.size() > 0) {
-        detach();
-
-        if (retVals.contains(new Integer(1))) {
-          throw new DebuggerException("Can not find executable \"" + executableName + "\"");
-        } else if (retVals.contains(new Integer(2))) {
-          throw new DebuggerException("Can not find core file \"" + coreFileName + "\"");
-        } else if (retVals.contains(new Integer(3))) {
-          throw new DebuggerException("Corrupt executable \"" + executableName + "\"");
-        } else if (retVals.contains(new Integer(4))) {
-          throw new DebuggerException("Corrupt core file \"" + coreFileName + "\"");
-        } else if (retVals.contains(new Integer(5))) {
-          throw new DebuggerException("Mismatched core file/executable \"" + coreFileName + "\"/\"" + executableName + "\"");
-        } else {
-          throw new DebuggerException("Couldn't find all loaded libraries for executable \"" + executableName + "\"");
-        }
-      }
-
-      // Throws DebuggerException upon failure
-      importDbxModule();
-
-      dbxOstr.println("svc_agent_run");
-
-      connectToImportModule();
-
-      // Set "fail fast" mode on process memory reads
-      printlnToOutput("peek_fail_fast 1");
-    }
-    catch (IOException e) {
-      detach();
-      throw new DebuggerException("Error while connecting to dbx process", e);
-    }
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public synchronized boolean detach() {
-    try {
-      if (dbxProcess == null) {
-        return false;
-      }
-
-      if (out != null && dbxOstr != null) {
-        printlnToOutput("exit");
-        dbxOstr.println("exit");
-
-        // Wait briefly for the process to exit (FIXME: should make this
-        // nicer)
-        try {
-          Thread.sleep(500);
-        }
-        catch (InterruptedException e) {
-        }
-      }
-
-      shutdown();
-
-      return true;
-    } catch (IOException e) {
-      e.printStackTrace();
-      return false;
-    }
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public Address parseAddress(String addressString) throws NumberFormatException {
-    long addr = utils.scanAddress(addressString);
-    if (addr == 0) {
-      return null;
-    }
-    return new DbxAddress(this, addr);
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public String getOS() {
-    return PlatformInfo.getOS();
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public String getCPU() {
-    return PlatformInfo.getCPU();
-  }
-
-  public boolean hasConsole() throws DebuggerException {
-    return true;
-  }
-
-  public synchronized String consoleExecuteCommand(String cmd) throws DebuggerException {
-    try {
-      // A little tricky. We need to cause the dbx import module to
-      // exit, then print our command on dbx's stdin along with a
-      // command which will allow our StreamMonitors to
-      // resynchronize. We need save the output from the StreamMonitors
-      // along the way.
-      printlnToOutput("exit");
-      importModuleSocket.close();
-      importModuleSocket = null;
-      out = null;
-      in = null;
-      dbxOstr.println("kprint \\(ready\\)");
-      dbxOstr.flush();
-      dbxOutStreamMonitor.waitFor("(ready)", LONG_TIMEOUT);
-
-      dbxOutStreamMonitor.startCapture();
-      dbxErrStreamMonitor.startCapture();
-      dbxOstr.println(cmd);
-      dbxOstr.println("kprint \\(ready\\)");
-      dbxOutStreamMonitor.waitFor("(ready)", LONG_TIMEOUT);
-      String result = dbxOutStreamMonitor.stopCapture();
-      String result2 = dbxErrStreamMonitor.stopCapture();
-      result = result + result2;
-      // Cut out the "(ready)" string
-      StringBuffer outBuf = new StringBuffer(result.length());
-      BufferedReader reader = new BufferedReader(new StringReader(result));
-      // FIXME: bug in BufferedReader? readLine returns null when
-      // ready() returns true.
-      String line = null;
-      do {
-        line = reader.readLine();
-        if ((line != null) && (!line.equals("(ready)"))) {
-          outBuf.append(line);
-          outBuf.append("\n");
-        }
-      } while (line != null);
-      dbxOstr.println("svc_agent_run");
-      dbxOstr.flush();
-
-      connectToImportModule();
-
-      return outBuf.toString();
-    }
-    catch (IOException e) {
-      detach();
-      throw new DebuggerException("Error while executing command on dbx console", e);
-    }
-  }
-
-  public String getConsolePrompt() throws DebuggerException {
-    return "(dbx) ";
-  }
-
-  public CDebugger getCDebugger() throws DebuggerException {
-    return null;
-  }
-
-  /** From the SymbolLookup interface via Debugger and JVMDebugger */
-  public synchronized Address lookup(String objectName, String symbol) {
-    long addr = lookupInProcess(objectName, symbol);
-    if (addr == 0) {
-      return null;
-    }
-    return new DbxAddress(this, addr);
-  }
-
-  /** From the SymbolLookup interface via Debugger and JVMDebugger */
-  public synchronized OopHandle lookupOop(String objectName, String symbol) {
-    long addr = lookupInProcess(objectName, symbol);
-    if (addr == 0) {
-      return null;
-    }
-    return new DbxOopHandle(this, addr);
-  }
-
-  /** From the Debugger interface */
-  public MachineDescription getMachineDescription() {
-    return machDesc;
-  }
-
-  /** Internal routine supporting lazy setting of MachineDescription,
-      since on SPARC we will need to query the remote process to ask
-      it what its data model is (32- or 64-bit). NOTE that this is NOT
-      present in the DbxDebugger interface because it should not be
-      called across the wire (until we support attaching to multiple
-      remote processes via RMI -- see the documentation for
-      DbxDebuggerRemoteIntf.) */
-  public void setMachineDescription(MachineDescription machDesc) {
-    this.machDesc = machDesc;
-    setBigEndian(machDesc.isBigEndian());
-    utils = new DebuggerUtilities(machDesc.getAddressSize(), machDesc.isBigEndian());
-  }
-
-  /** Internal routine which queries the remote process about its data
-      model -- i.e., size of addresses. Returns -1 upon error.
-      Currently supported return values are 32 and 64. NOTE that this
-      is NOT present in the DbxDebugger interface because it should
-      not be called across the wire (until we support attaching to
-      multiple remote processes via RMI -- see the documentation for
-      DbxDebuggerRemoteIntf.) */
-  public int getRemoteProcessAddressSize() {
-    if (dbxProcess == null) {
-      throw new RuntimeException("Not attached to remote process");
-    }
-
-    try {
-      printlnToOutput("address_size");
-      int i = in.parseInt();
-      return i;
-    }
-    catch (IOException e) {
-      return -1;
-    }
-  }
-
-  //--------------------------------------------------------------------------------
-  // Implementation of ThreadAccess interface
-  //
-
-  /** From the ThreadAccess interface via Debugger and JVMDebugger */
-  public ThreadProxy getThreadForIdentifierAddress(Address addr) {
-    return threadFactory.createThreadWrapper(addr);
-  }
-
-  public ThreadProxy getThreadForThreadId(long id) {
-    return threadFactory.createThreadWrapper(id);
-  }
-
-  //----------------------------------------------------------------------
-  // Overridden from DebuggerBase because we need to relax alignment
-  // constraints on x86
-
-  public long readJLong(long address)
-    throws UnmappedAddressException, UnalignedAddressException {
-    checkJavaConfigured();
-    // FIXME: allow this to be configurable. Undesirable to add a
-    // dependency on the runtime package here, though, since this
-    // package should be strictly underneath it.
-    if (unalignedAccessesOkay) {
-      utils.checkAlignment(address, jintSize);
-    } else {
-      utils.checkAlignment(address, jlongSize);
-    }
-    byte[] data = readBytes(address, jlongSize);
-    return utils.dataToJLong(data, jlongSize);
-  }
-
-  //--------------------------------------------------------------------------------
-  // Internal routines (for implementation of DbxAddress).
-  // These must not be called until the MachineDescription has been set up.
-  //
-
-  /** From the DbxDebugger interface */
-  public String addressValueToString(long address) {
-    return utils.addressValueToString(address);
-  }
-
-  /** Need to override this to relax alignment checks on Solaris/x86. */
-  public long readCInteger(long address, long numBytes, boolean isUnsigned)
-    throws UnmappedAddressException, UnalignedAddressException {
-    checkConfigured();
-    if (!unalignedAccessesOkay) {
-      utils.checkAlignment(address, numBytes);
-    } else {
-      // Only slightly relaxed semantics -- this is a hack, but is
-      // necessary on Solaris/x86 where it seems the compiler is
-      // putting some global 64-bit data on 32-bit boundaries
-      if (numBytes == 8) {
-        utils.checkAlignment(address, 4);
-      } else {
-        utils.checkAlignment(address, numBytes);
-      }
-    }
-    byte[] data = readBytes(address, numBytes);
-    return utils.dataToCInteger(data, isUnsigned);
-  }
-
-  /** From the DbxDebugger interface */
-  public DbxAddress readAddress(long address)
-    throws UnmappedAddressException, UnalignedAddressException {
-    long value = readAddressValue(address);
-    return (value == 0 ? null : new DbxAddress(this, value));
-  }
-
-  public DbxAddress readCompOopAddress(long address)
-    throws UnmappedAddressException, UnalignedAddressException {
-    long value = readCompOopAddressValue(address);
-    return (value == 0 ? null : new DbxAddress(this, value));
-  }
-
-  /** From the DbxDebugger interface */
-  public DbxOopHandle readOopHandle(long address)
-    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
-    long value = readAddressValue(address);
-    return (value == 0 ? null : new DbxOopHandle(this, value));
-  }
-  public DbxOopHandle readCompOopHandle(long address)
-    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
-    long value = readCompOopAddressValue(address);
-    return (value == 0 ? null : new DbxOopHandle(this, value));
-  }
-
-  //--------------------------------------------------------------------------------
-  // Thread context access. Can not be package private, but should
-  // only be accessed by the architecture-specific subpackages.
-
-  /** From the DbxDebugger interface. May have to redefine this later. */
-  public synchronized long[] getThreadIntegerRegisterSet(int tid) {
-    try {
-      printlnToOutput("thr_gregs " + tid);
-      int num = in.parseInt();
-      long[] res = new long[num];
-      for (int i = 0; i < num; i++) {
-        res[i] = in.parseAddress();
-      }
-      return res;
-    }
-    catch (Exception e) {
-      e.printStackTrace();
-      return null;
-    }
-  }
-
-  //--------------------------------------------------------------------------------
-  // Address access. Can not be package private, but should only be
-  // accessed by the architecture-specific subpackages.
-
-  /** From the Debugger interface */
-  public long getAddressValue(Address addr) {
-    if (addr == null) return 0;
-    return ((DbxAddress) addr).getValue();
-  }
-
-  /** From the DbxDebugger interface */
-  public Address newAddress(long value) {
-    if (value == 0) return null;
-    return new DbxAddress(this, value);
-  }
-
-  //--------------------------------------------------------------------------------
-  // Internals only below this point
-  //
-
-  private void launchProcess() throws IOException {
-    dbxProcess = Runtime.getRuntime().exec(dbxPathName);
-    //      dbxOutStreamMonitor = new StreamMonitor(dbxProcess.getInputStream());
-    //      dbxErrStreamMonitor = new StreamMonitor(dbxProcess.getErrorStream());
-    dbxOutStreamMonitor = new StreamMonitor(dbxProcess.getInputStream(), "dbx stdout", true);
-    dbxErrStreamMonitor = new StreamMonitor(dbxProcess.getErrorStream(), "dbx stderr", true);
-  }
-
-  /** Requires that dbxErrStreamMonitor has a trigger on "dbx: Cannot
-      find" with number DBX_MODULE_NOT_FOUND as well as one on "dbx:
-      warning:" (plus the serviceability agent's dbx module path name,
-      to avoid conflation with inability to load individual object
-      files) with number DBX_MODULE_FAILED_TO_LOAD. The former
-      indicates an absence of libsvc_agent_dbx.so, while the latter
-      indicates that the module failed to load, specifically because
-      the architecture was mismatched. (I don't see a way to detect
-      from the dbx command prompt whether it's running the v8 or v9
-      executbale, so we try to import both flavors of the import
-      module; the "v8" file name convention doesn't actually include
-      the v8 prefix, so this code should work for Intel as well.) */
-  private void importDbxModule() throws DebuggerException {
-    // Trigger for a successful load
-    dbxOutStreamMonitor.addTrigger("Defining svc_agent_run", DBX_MODULE_LOADED);
-    for (int i = 0; i < dbxSvcAgentDSOPathNames.length; i++) {
-      dbxOstr.println("import " + dbxSvcAgentDSOPathNames[i]);
-      dbxOstr.println("kprint -u2 \\(Ready\\)");
-      boolean seen = dbxErrStreamMonitor.waitFor("(Ready)", LONG_TIMEOUT);
-      if (!seen) {
-        detach();
-        throw new DebuggerException("Timed out while importing dbx module from file\n" + dbxSvcAgentDSOPathNames[i]);
-      }
-      List retVals = dbxErrStreamMonitor.getTriggersSeen();
-      if (retVals.contains(new Integer(DBX_MODULE_NOT_FOUND))) {
-        detach();
-        throw new DebuggerException("Unable to find the Serviceability Agent's dbx import module at pathname \"" +
-                                    dbxSvcAgentDSOPathNames[i] + "\"");
-      } else {
-        retVals = dbxOutStreamMonitor.getTriggersSeen();
-        if (retVals.contains(new Integer(DBX_MODULE_LOADED))) {
-          System.out.println("importDbxModule: imported " +  dbxSvcAgentDSOPathNames[i]);
-          return;
-        }
-      }
-    }
-
-    // Failed to load all flavors
-    detach();
-    String errMsg = ("Unable to find a version of the Serviceability Agent's dbx import module\n" +
-                     "matching the architecture of dbx at any of the following locations:");
-    for (int i = 0; i < dbxSvcAgentDSOPathNames.length; i++) {
-      errMsg = errMsg + "\n" + dbxSvcAgentDSOPathNames[i];
-    }
-    throw new DebuggerException(errMsg);
-  }
-
-  /** Terminate the debugger forcibly */
-  private void shutdown() {
-
-    if (dbxProcess != null) {
-      // See whether the process has exited and, if not, terminate it
-      // forcibly
-      try {
-        dbxProcess.exitValue();
-      }
-      catch (IllegalThreadStateException e) {
-        dbxProcess.destroy();
-      }
-    }
-
-    try {
-      if (importModuleSocket != null) {
-        importModuleSocket.close();
-      }
-    }
-    catch (IOException e) {
-    }
-
-    // Release references to all objects
-    clear();
-    clearCache();
-  }
-
-  /** Looks up an address in the remote process's address space.
-      Returns 0 if symbol not found or upon error. Package private to
-      allow DbxDebuggerRemoteIntfImpl access. */
-  synchronized long lookupInProcess(String objectName, String symbol) {
-    try {
-      printlnToOutput("lookup " + objectName + " " + symbol);
-      return in.parseAddress();
-    }
-    catch (Exception e) {
-      return 0;
-    }
-  }
-
-  /** This reads bytes from the remote process. */
-  public synchronized ReadResult readBytesFromProcess(long address, long numBytes)
-    throws DebuggerException {
-    if (numBytes < 0) {
-      throw new DebuggerException("Can not read negative number (" + numBytes + ") of bytes from process");
-    }
-    try {
-      String cmd = "peek " + utils.addressValueToString(address) + " " + numBytes;
-      printlnToOutput(cmd);
-      while (in.readByte() != 'B') {
-      }
-      byte res = in.readByte();
-      if (res == 0) {
-        System.err.println("Failing command: " + cmd);
-        throw new DebuggerException("Read of remote process address space failed");
-      }
-      // NOTE: must read ALL of the data regardless of whether we need
-      // to throw an UnmappedAddressException. Otherwise will corrupt
-      // the input stream each time we have a failure. Not good. Do
-      // not want to risk "flushing" the input stream in case a huge
-      // read has a hangup in the middle and we leave data on the
-      // stream.
-      byte[] buf = new byte[(int) numBytes];
-      boolean bailOut = false;
-      long failureAddress = 0;
-      int numReads = 0;
-      while (numBytes > 0) {
-        long len = in.readUnsignedInt();
-        boolean isMapped = ((in.readByte() == 0) ? false : true);
-        if (!isMapped) {
-          if (!bailOut) {
-            bailOut = true;
-            failureAddress = address;
-          }
-        } else {
-          // This won't work if we have unmapped regions, but if we do
-          // then we're going to throw an exception anyway
-
-          // NOTE: there is a factor of 20 speed difference between
-          // these two ways of doing this read.
-          in.readBytes(buf, 0, (int) len);
-        }
-
-        // Do NOT do this:
-        //        for (int i = 0; i < (int) len; i++) {
-        //          buf[i] = in.readByte();
-        //        }
-
-        numBytes -= len;
-        address += len;
-        ++numReads;
-      }
-      if (Assert.ASSERTS_ENABLED) {
-        Assert.that(numBytes == 0, "Bug in debug server's implementation of peek: numBytesLeft == " +
-                    numBytes + ", should be 0 (did " + numReads + " reads)");
-      }
-      if (bailOut) {
-        return new ReadResult(failureAddress);
-      }
-      return new ReadResult(buf);
-    }
-    catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  public void writeBytesToProcess(long address, long numBytes, byte[] data)
-    throws UnmappedAddressException, DebuggerException {
-    // FIXME
-    throw new DebuggerException("Unimplemented");
-  }
-
-  /** This provides DbxDebuggerRemoteIntfImpl access to readBytesFromProcess */
-  ReadResult readBytesFromProcessInternal(long address, long numBytes)
-    throws DebuggerException {
-    return readBytesFromProcess(address, numBytes);
-  }
-
-  /** Convenience routine */
-  private void printlnToOutput(String s) throws IOException {
-    out.println(s);
-    if (out.checkError()) {
-      throw new IOException("Error occurred while writing to debug server");
-    }
-  }
-
-  private void clear() {
-    dbxProcess = null;
-    dbxOstr = null;
-    out = null;
-    in = null;
-    importModuleSocket = null;
-  }
-
-  /** Connects to the dbx import module, setting up out and in
-      streams. Factored out to allow access to the dbx console. */
-  private void connectToImportModule() throws IOException {
-    // Try for 20 seconds to connect to dbx import module; time out
-    // with failure if didn't succeed
-    importModuleSocket = null;
-    long endTime = System.currentTimeMillis() + LONG_TIMEOUT;
-
-    while ((importModuleSocket == null) && (System.currentTimeMillis() < endTime)) {
-      try {
-        importModuleSocket = new Socket(InetAddress.getLocalHost(), PORT);
-        importModuleSocket.setTcpNoDelay(true);
-      }
-      catch (IOException e) {
-        // Swallow IO exceptions while attempting connection
-        try {
-          // Don't swamp the CPU
-          Thread.sleep(1000);
-        }
-        catch (InterruptedException ex) {
-        }
-      }
-    }
-
-    if (importModuleSocket == null) {
-      // Failed to connect because of timeout
-      detach();
-      throw new DebuggerException("Timed out while attempting to connect to remote dbx process");
-    }
-
-    out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(importModuleSocket.getOutputStream(), "US-ASCII")), true);
-    in = new InputLexer(new BufferedInputStream(importModuleSocket.getInputStream()));
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxOopHandle.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx;
-
-import sun.jvm.hotspot.debugger.*;
-
-class DbxOopHandle extends DbxAddress implements OopHandle {
-  DbxOopHandle(DbxDebugger debugger, long addr) {
-    super(debugger, addr);
-  }
-
-  public Address    addOffsetTo       (long offset) throws UnsupportedOperationException {
-    throw new UnsupportedOperationException("addOffsetTo not applicable to OopHandles (interior object pointers not allowed)");
-  }
-
-  public Address    andWithMask(long mask) throws UnsupportedOperationException {
-    throw new UnsupportedOperationException("andWithMask not applicable to OopHandles (i.e., anything but C addresses)");
-  }
-
-  public Address    orWithMask(long mask) throws UnsupportedOperationException {
-    throw new UnsupportedOperationException("orWithMask not applicable to OopHandles (i.e., anything but C addresses)");
-  }
-
-  public Address    xorWithMask(long mask) throws UnsupportedOperationException {
-    throw new UnsupportedOperationException("xorWithMask not applicable to OopHandles (i.e., anything but C addresses)");
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxThreadFactory.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx;
-
-import sun.jvm.hotspot.debugger.*;
-
-/** An interface used only internally by the DbxDebugger to be able to
-    create platform-specific Thread objects */
-
-public interface DbxThreadFactory {
-  public ThreadProxy createThreadWrapper(Address threadIdentifierAddr);
-  public ThreadProxy createThreadWrapper(long id);
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/sparc/DbxSPARCThread.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx.sparc;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.sparc.*;
-import sun.jvm.hotspot.debugger.dbx.*;
-import sun.jvm.hotspot.utilities.*;
-
-public class DbxSPARCThread implements ThreadProxy {
-  private DbxDebugger debugger;
-  private int         id;
-
-  public DbxSPARCThread(DbxDebugger debugger, Address addr) {
-    this.debugger = debugger;
-
-    // FIXME: the size here should be configurable. However, making it
-    // so would produce a dependency on the "types" package from the
-    // debugger package, which is not desired.
-    this.id       = (int) addr.getCIntegerAt(0, 4, true);
-  }
-
-  public DbxSPARCThread(DbxDebugger debugger, long id) {
-    this.debugger = debugger;
-    this.id = (int) id;
-  }
-
-  public boolean equals(Object obj) {
-    if ((obj == null) || !(obj instanceof DbxSPARCThread)) {
-      return false;
-    }
-
-    return (((DbxSPARCThread) obj).id == id);
-  }
-
-  public int hashCode() {
-    return id;
-  }
-
-  public ThreadContext getContext() throws IllegalThreadStateException {
-    DbxSPARCThreadContext context = new DbxSPARCThreadContext(debugger);
-    long[] regs = debugger.getThreadIntegerRegisterSet(id);
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(regs.length == SPARCThreadContext.NPRGREG, "size of register set must match");
-    }
-    for (int i = 0; i < regs.length; i++) {
-      context.setRegister(i, regs[i]);
-    }
-    return context;
-  }
-
-  public boolean canSetContext() throws DebuggerException {
-    return false;
-  }
-
-  public void setContext(ThreadContext context)
-    throws IllegalThreadStateException, DebuggerException {
-    throw new DebuggerException("Unimplemented");
-  }
-
-  public String toString() {
-    return "t@" + id;
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/sparc/DbxSPARCThreadContext.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx.sparc;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.sparc.*;
-import sun.jvm.hotspot.debugger.dbx.*;
-
-public class DbxSPARCThreadContext extends SPARCThreadContext {
-  private DbxDebugger debugger;
-
-  public DbxSPARCThreadContext(DbxDebugger debugger) {
-    super();
-    this.debugger = debugger;
-  }
-
-  public void setRegisterAsAddress(int index, Address value) {
-    setRegister(index, debugger.getAddressValue(value));
-  }
-
-  public Address getRegisterAsAddress(int index) {
-    return debugger.newAddress(getRegister(index));
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/sparc/DbxSPARCThreadFactory.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx.sparc;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.dbx.*;
-
-public class DbxSPARCThreadFactory implements DbxThreadFactory {
-  private DbxDebugger debugger;
-
-  public DbxSPARCThreadFactory(DbxDebugger debugger) {
-    this.debugger = debugger;
-  }
-
-  public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) {
-    return new DbxSPARCThread(debugger, threadIdentifierAddr);
-  }
-
-  public ThreadProxy createThreadWrapper(long id) {
-    return new DbxSPARCThread(debugger, id);
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/x86/DbxX86Thread.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx.x86;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.x86.*;
-import sun.jvm.hotspot.debugger.dbx.*;
-import sun.jvm.hotspot.utilities.*;
-
-public class DbxX86Thread implements ThreadProxy {
-  private DbxDebugger debugger;
-  private int         id;
-
-  public DbxX86Thread(DbxDebugger debugger, Address addr) {
-    this.debugger = debugger;
-
-    // FIXME: the size here should be configurable. However, making it
-    // so would produce a dependency on the "types" package from the
-    // debugger package, which is not desired.
-    this.id       = (int) addr.getCIntegerAt(0, 4, true);
-  }
-
-  public DbxX86Thread(DbxDebugger debugger, long id) {
-    this.debugger = debugger;
-    this.id  = (int) id;
-  }
-
-  public boolean equals(Object obj) {
-    if ((obj == null) || !(obj instanceof DbxX86Thread)) {
-      return false;
-    }
-
-    return (((DbxX86Thread) obj).id == id);
-  }
-
-  public int hashCode() {
-    return id;
-  }
-
-  public ThreadContext getContext() throws IllegalThreadStateException {
-    DbxX86ThreadContext context = new DbxX86ThreadContext(debugger);
-    long[] regs = debugger.getThreadIntegerRegisterSet(id);
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(regs.length == 19, "unknown size of register set -- adjust this code");
-    }
-    for (int i = 0; i < regs.length; i++) {
-      context.setRegister(i, regs[i]);
-    }
-    return context;
-  }
-
-  public boolean canSetContext() throws DebuggerException {
-    return false;
-  }
-
-  public void setContext(ThreadContext context)
-    throws IllegalThreadStateException, DebuggerException {
-    throw new DebuggerException("Unimplemented");
-  }
-
-  public String toString() {
-    return "t@" + id;
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/x86/DbxX86ThreadContext.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx.x86;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.x86.*;
-import sun.jvm.hotspot.debugger.dbx.*;
-
-public class DbxX86ThreadContext extends X86ThreadContext {
-  private DbxDebugger debugger;
-
-  public DbxX86ThreadContext(DbxDebugger debugger) {
-    super();
-    this.debugger = debugger;
-  }
-
-  public void setRegisterAsAddress(int index, Address value) {
-    setRegister(index, debugger.getAddressValue(value));
-  }
-
-  public Address getRegisterAsAddress(int index) {
-    return debugger.newAddress(getRegister(index));
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/x86/DbxX86ThreadFactory.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.dbx.x86;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.dbx.*;
-
-public class DbxX86ThreadFactory implements DbxThreadFactory {
-  private DbxDebugger debugger;
-
-  public DbxX86ThreadFactory(DbxDebugger debugger) {
-    this.debugger = debugger;
-  }
-
-  public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) {
-    return new DbxX86Thread(debugger, threadIdentifierAddr);
-  }
-
-  public ThreadProxy createThreadWrapper(long id) {
-    return new DbxX86Thread(debugger, id);
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/AddressDataSource.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import java.io.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.win32.coff.*;
-
-class AddressDataSource implements DataSource {
-  AddressDataSource(Address addr) {
-    this.addr = addr;
-    offset = 0;
-  }
-
-  public byte readByte() throws IOException {
-    try {
-      byte res = (byte) addr.getCIntegerAt(offset, 1, false);
-      ++offset;
-      return res;
-    } catch (UnmappedAddressException e) {
-      throw (IOException) new IOException("Unmapped address at 0x" + Long.toHexString(e.getAddress())).initCause(e);
-    } catch (DebuggerException e) {
-      throw (IOException) new IOException(e.toString()).initCause(e);
-    }
-  }
-
-  public short readShort() throws IOException {
-    // NOTE: byte swapping is taken care of at the COFFFileImpl level
-    int b1 = readByte() & 0xFF;
-    int b2 = readByte() & 0xFF;
-    return (short) ((b1 << 8) | b2);
-  }
-
-  public int readInt() throws IOException {
-    // NOTE: byte swapping is taken care of at the COFFFileImpl level
-    int b1 = ((int) readByte()) & 0xFF;
-    int b2 = ((int) readByte()) & 0xFF;
-    int b3 = ((int) readByte()) & 0xFF;
-    int b4 = ((int) readByte()) & 0xFF;
-    return ((b1 << 24) | (b2 << 16) | (b3 << 8) | b4);
-  }
-
-  public long readLong() throws IOException {
-    // NOTE: byte swapping is taken care of at the COFFFileImpl level
-    long b1 = ((long) readByte()) & 0xFFL;
-    long b2 = ((long) readByte()) & 0xFFL;
-    long b3 = ((long) readByte()) & 0xFFL;
-    long b4 = ((long) readByte()) & 0xFFL;
-    long b5 = ((long) readByte()) & 0xFFL;
-    long b6 = ((long) readByte()) & 0xFFL;
-    long b7 = ((long) readByte()) & 0xFFL;
-    long b8 = ((long) readByte()) & 0xFFL;
-    return (((((b1 << 24) | (b2 << 16) | (b3 << 8) | b4)) << 32) |
-            ((((b5 << 24) | (b6 << 16) | (b7 << 8) | b8))));
-  }
-
-  public int read(byte[] b) throws IOException {
-    for (int i = 0; i < b.length; i++) {
-      b[i] = readByte();
-    }
-    return b.length;
-  }
-
-  public void seek(long pos) throws IOException {
-    offset = pos;
-  }
-
-  public long getFilePointer() throws IOException {
-    return offset;
-  }
-
-  public void close() throws IOException {
-  }
-
-  private Address addr;
-  private long offset;
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/DLL.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,209 +0,0 @@
-/*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.win32.coff.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
-import sun.jvm.hotspot.utilities.Assert;
-import sun.jvm.hotspot.utilities.memo.*;
-
-/** Provides a simple wrapper around the COFF library which handles
-    relocation. A DLL can represent either a DLL or an EXE file. */
-
-class DLL implements LoadObject {
-
-  DLL(Win32Debugger dbg, String filename, long size, Address relocation) throws COFFException {
-    this.dbg     = dbg;
-    fullPathName = filename;
-    this.size    = size;
-    file = new MemoizedObject() {
-        public Object computeValue() {
-          return COFFFileParser.getParser().parse(fullPathName);
-        }
-      };
-    addr = relocation;
-  }
-
-  /** This constructor was originally used to fetch the DLL's name out
-      of the target process to match it up with the known DLL names,
-      before the fetching of the DLL names and bases was folded into
-      one command. It is no longer used. If it is used, getName() will
-      return null and getSize() will return 0. */
-  DLL(Address base) throws COFFException {
-    this.addr = base;
-    file = new MemoizedObject() {
-        public Object computeValue() {
-          return COFFFileParser.getParser().parse(new AddressDataSource(addr));
-        }
-      };
-  }
-
-  /** Indicates whether this is really a DLL or actually a .EXE
-      file. */
-  boolean isDLL() {
-    return getFile().getHeader().hasCharacteristic(Characteristics.IMAGE_FILE_DLL);
-  }
-
-  /** Look up a symbol; returns absolute address or null if symbol was
-      not found. */
-  Address lookupSymbol(String symbol) throws COFFException {
-    if (!isDLL()) {
-      return null;
-    }
-    ExportDirectoryTable exports = getExportDirectoryTable();
-    return lookupSymbol(symbol, exports,
-                        0, exports.getNumberOfNamePointers() - 1);
-  }
-
-  public Address getBase() {
-    return addr;
-  }
-
-  /** Returns the full path name of this DLL/EXE, or null if this DLL
-      object was created by parsing the target process's address
-      space. */
-  public String getName() {
-    return fullPathName;
-  }
-
-  public long getSize() {
-    return size;
-  }
-
-  public CDebugInfoDataBase getDebugInfoDataBase() throws DebuggerException {
-    if (db != null) {
-      return db;
-    }
-
-    // Try to parse
-    if (dbg == null) {
-      return null; // Need Win32Debugger
-    }
-
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(fullPathName != null, "Need full path name to build debug info database");
-    }
-
-    db = new Win32CDebugInfoBuilder(dbg).buildDataBase(fullPathName, addr);
-    return db;
-  }
-
-  public BlockSym debugInfoForPC(Address pc) throws DebuggerException {
-    CDebugInfoDataBase db = getDebugInfoDataBase();
-    if (db == null) {
-      return null;
-    }
-    return db.debugInfoForPC(pc);
-  }
-
-  public ClosestSymbol closestSymbolToPC(Address pcAsAddr) throws DebuggerException {
-    ExportDirectoryTable exports = getExportDirectoryTable();
-    if (exports == null) {
-      return null;
-    }
-    String name = null;
-    long   pc   = dbg.getAddressValue(pcAsAddr);
-    long   diff = Long.MAX_VALUE;
-    long   base = dbg.getAddressValue(addr);
-    for (int i = 0; i < exports.getNumberOfNamePointers(); i++) {
-      if (!exports.isExportAddressForwarder(exports.getExportOrdinal(i))) {
-        long tmp = base + (exports.getExportAddress(exports.getExportOrdinal(i)) & 0xFFFFFFFF);
-        if ((tmp <= pc) && ((pc - tmp) < diff)) {
-          diff = pc - tmp;
-          name = exports.getExportName(i);
-        }
-      }
-    }
-    if (name == null) {
-      return null;
-    }
-    return new ClosestSymbol(name, diff);
-  }
-
-  public LineNumberInfo lineNumberForPC(Address pc) throws DebuggerException {
-    CDebugInfoDataBase db = getDebugInfoDataBase();
-    if (db == null) {
-      return null;
-    }
-    return db.lineNumberForPC(pc);
-  }
-
-  void close() {
-    getFile().close();
-    file = null;
-  }
-
-  //----------------------------------------------------------------------
-  // Internals only below this point
-  //
-
-  private COFFFile getFile() {
-    return (COFFFile) file.getValue();
-  }
-
-  private Address lookupSymbol(String symbol, ExportDirectoryTable exports,
-                               int loIdx, int hiIdx) {
-    do {
-      int curIdx = ((loIdx + hiIdx) >> 1);
-      String cur = exports.getExportName(curIdx);
-      if (symbol.equals(cur)) {
-        return addr.addOffsetTo(
-          ((long) exports.getExportAddress(exports.getExportOrdinal(curIdx))) & 0xFFFFFFFFL
-        );
-      }
-      if (symbol.compareTo(cur) < 0) {
-        if (hiIdx == curIdx) {
-          hiIdx = curIdx - 1;
-        } else {
-          hiIdx = curIdx;
-        }
-      } else {
-        if (loIdx == curIdx) {
-          loIdx = curIdx + 1;
-        } else {
-          loIdx = curIdx;
-        }
-      }
-    } while (loIdx <= hiIdx);
-
-    return null;
-  }
-
-  private ExportDirectoryTable getExportDirectoryTable() {
-    return
-      getFile().getHeader().getOptionalHeader().getDataDirectories().getExportDirectoryTable();
-  }
-
-  private Win32Debugger  dbg;
-  private String         fullPathName;
-  private long           size;
-  // MemoizedObject contains a COFFFile
-  private MemoizedObject file;
-  // Base address of module in target process
-  private Address        addr;
-  // Debug info database for this DLL
-  private CDebugInfoDataBase db;
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/TestDebugger.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-
-public class TestDebugger {
-  private static void usage() {
-    System.out.println("usage: java TestDebugger [pid]");
-    System.exit(1);
-  }
-
-  public static void main(String[] args) {
-    try {
-      if (args.length != 1) {
-        usage();
-      }
-
-      int pid = 0;
-      try {
-        pid = Integer.parseInt(args[0]);
-      }
-      catch (NumberFormatException e) {
-        usage();
-      }
-
-      JVMDebugger debugger = new Win32DebuggerLocal(new MachineDescriptionIntelX86(), true);
-      System.err.println("Process list: ");
-      List processes = debugger.getProcessList();
-      for (Iterator iter = processes.iterator(); iter.hasNext(); ) {
-        ProcessInfo info = (ProcessInfo) iter.next();
-        System.err.println(info.getPid() + " " + info.getName());
-      }
-      System.err.println("Trying to attach...");
-      debugger.attach(pid);
-      System.err.println("Attach succeeded.");
-      System.err.println("Trying to detach...");
-      if (!debugger.detach()) {
-        System.err.println("ERROR: detach failed.");
-        System.exit(0);
-      }
-      System.err.println("Detach succeeded.");
-    } catch (Exception e) {
-      e.printStackTrace();
-    }
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/TestHelloWorld.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-
-/** Tests to see whether we can find the "Hello, World" string in a
-    target process */
-
-public class TestHelloWorld {
-  private static void usage() {
-    System.out.println("usage: java TestHelloWorld [pid]");
-    System.out.println("pid must be the process ID of the HelloWorldDLL programs");
-    System.exit(1);
-  }
-
-  public static void main(String[] args) {
-    try {
-      if (args.length != 1) {
-        usage();
-      }
-
-      int pid = 0;
-      try {
-        pid = Integer.parseInt(args[0]);
-      }
-      catch (NumberFormatException e) {
-        usage();
-      }
-
-      JVMDebugger debugger = new Win32DebuggerLocal(new MachineDescriptionIntelX86(), true);
-      System.err.println("Trying to attach...");
-      debugger.attach(pid);
-      System.err.println("Attach succeeded.");
-      Address addr = debugger.lookup("helloworld.dll", "helloWorldString");
-      System.err.println("helloWorldString address = " + addr);
-      System.err.println("Trying to detach...");
-      if (!debugger.detach()) {
-        System.err.println("ERROR: detach failed.");
-        System.exit(0);
-      }
-      System.err.println("Detach succeeded.");
-    } catch (Exception e) {
-      e.printStackTrace();
-    }
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Address.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,403 +0,0 @@
-/*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import sun.jvm.hotspot.debugger.*;
-
-class Win32Address implements Address {
-  protected Win32Debugger debugger;
-  protected long addr;
-
-  Win32Address(Win32Debugger debugger, long addr) {
-    this.debugger = debugger;
-    this.addr = addr;
-  }
-
-  //
-  // Basic Java routines
-  //
-
-  public boolean equals(Object arg) {
-    if (arg == null) {
-      return false;
-    }
-
-    if (!(arg instanceof Win32Address)) {
-      return false;
-    }
-
-    return (addr == ((Win32Address) arg).addr);
-  }
-
-  public int hashCode() {
-    // FIXME: suggestions on a better hash code?
-    return (int) addr;
-  }
-
-  public String toString() {
-    return debugger.addressValueToString(addr);
-  }
-
-  //
-  // C/C++-related routines
-  //
-
-  public long getCIntegerAt(long offset, long numBytes, boolean isUnsigned) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readCInteger(addr + offset, numBytes, isUnsigned);
-  }
-
-  public Address getAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readAddress(addr + offset);
-  }
-
-  public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readCompOopAddress(addr + offset);
-  }
-
-  //
-  // Java-related routines
-  //
-
-  public boolean getJBooleanAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJBoolean(addr + offset);
-  }
-
-  public byte getJByteAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJByte(addr + offset);
-  }
-
-  public char getJCharAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJChar(addr + offset);
-  }
-
-  public double getJDoubleAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJDouble(addr + offset);
-  }
-
-  public float getJFloatAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJFloat(addr + offset);
-  }
-
-  public int getJIntAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJInt(addr + offset);
-  }
-
-  public long getJLongAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJLong(addr + offset);
-  }
-
-  public short getJShortAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
-    return debugger.readJShort(addr + offset);
-  }
-
-  public OopHandle getOopHandleAt(long offset)
-    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
-    return debugger.readOopHandle(addr + offset);
-  }
-  public OopHandle getCompOopHandleAt(long offset)
-    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
-    return debugger.readCompOopHandle(addr + offset);
-  }
-
-  //
-  // C/C++-related mutators
-  //
-
-  public void setCIntegerAt(long offset, long numBytes, long value) {
-    debugger.writeCInteger(addr + offset, numBytes, value);
-  }
-  public void setAddressAt(long offset, Address value) {
-    debugger.writeAddress(addr + offset, (Win32Address) value);
-  }
-
-  //
-  // Java-related mutators
-  //
-
-  public void       setJBooleanAt      (long offset, boolean value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    debugger.writeJBoolean(addr + offset, value);
-  }
-  public void       setJByteAt         (long offset, byte value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    debugger.writeJByte(addr + offset, value);
-  }
-  public void       setJCharAt         (long offset, char value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    debugger.writeJChar(addr + offset, value);
-  }
-  public void       setJDoubleAt       (long offset, double value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    debugger.writeJDouble(addr + offset, value);
-  }
-  public void       setJFloatAt        (long offset, float value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    debugger.writeJFloat(addr + offset, value);
-  }
-  public void       setJIntAt          (long offset, int value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    debugger.writeJInt(addr + offset, value);
-  }
-  public void       setJLongAt         (long offset, long value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    debugger.writeJLong(addr + offset, value);
-  }
-  public void       setJShortAt        (long offset, short value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    debugger.writeJShort(addr + offset, value);
-  }
-  public void       setOopHandleAt     (long offset, OopHandle value)
-    throws UnmappedAddressException, UnalignedAddressException {
-    debugger.writeOopHandle(addr + offset, (Win32OopHandle) value);
-  }
-
-  //
-  // Arithmetic operations -- necessary evil.
-  //
-
-  public Address    addOffsetTo       (long offset) throws UnsupportedOperationException {
-    long value = addr + offset;
-    if (value == 0) {
-      return null;
-    }
-    return new Win32Address(debugger, value);
-  }
-
-  public OopHandle  addOffsetToAsOopHandle(long offset) throws UnsupportedOperationException {
-    long value = addr + offset;
-    if (value == 0) {
-      return null;
-    }
-    return new Win32OopHandle(debugger, value);
-  }
-
-  /** (FIXME: any signed/unsigned issues? Should this work for
-      OopHandles?) */
-  public long       minus(Address arg) {
-    if (arg == null) {
-      return addr;
-    }
-    return addr - ((Win32Address) arg).addr;
-  }
-
-  // Two's complement representation.
-  // All negative numbers are larger than positive numbers.
-  // Numbers with the same sign can be compared normally.
-  // Test harness is below in main().
-
-  public boolean    lessThan          (Address a) {
-    if (a == null) {
-      return false;
-    }
-    Win32Address arg = (Win32Address) a;
-    if ((addr >= 0) && (arg.addr < 0)) {
-      return true;
-    }
-    if ((addr < 0) && (arg.addr >= 0)) {
-      return false;
-    }
-    return (addr < arg.addr);
-  }
-
-  public boolean    lessThanOrEqual   (Address a) {
-    if (a == null) {
-      return false;
-    }
-    Win32Address arg = (Win32Address) a;
-    if ((addr >= 0) && (arg.addr < 0)) {
-      return true;
-    }
-    if ((addr < 0) && (arg.addr >= 0)) {
-      return false;
-    }
-    return (addr <= arg.addr);
-  }
-
-  public boolean    greaterThan       (Address a) {
-    if (a == null) {
-      return true;
-    }
-    Win32Address arg = (Win32Address) a;
-    if ((addr >= 0) && (arg.addr < 0)) {
-      return false;
-    }
-    if ((addr < 0) && (arg.addr >= 0)) {
-      return true;
-    }
-    return (addr > arg.addr);
-  }
-
-  public boolean    greaterThanOrEqual(Address a) {
-    if (a == null) {
-      return true;
-    }
-    Win32Address arg = (Win32Address) a;
-    if ((addr >= 0) && (arg.addr < 0)) {
-      return false;
-    }
-    if ((addr < 0) && (arg.addr >= 0)) {
-      return true;
-    }
-    return (addr >= arg.addr);
-  }
-
-  public Address    andWithMask(long mask) throws UnsupportedOperationException {
-    long value = addr & mask;
-    if (value == 0) {
-      return null;
-    }
-    return new Win32Address(debugger, value);
-  }
-
-  public Address    orWithMask(long mask) throws UnsupportedOperationException {
-    long value = addr | mask;
-    if (value == 0) {
-      return null;
-    }
-    return new Win32Address(debugger, value);
-  }
-
-  public Address    xorWithMask(long mask) throws UnsupportedOperationException {
-    long value = addr ^ mask;
-    if (value == 0) {
-      return null;
-    }
-    return new Win32Address(debugger, value);
-  }
-
-
-  //--------------------------------------------------------------------------------
-  // Internals only below this point
-  //
-
-  long getValue() {
-    return addr;
-  }
-
-
-  private static void check(boolean arg, String failMessage) {
-    if (!arg) {
-      System.err.println(failMessage + ": FAILED");
-      System.exit(1);
-    }
-  }
-
-  // Test harness
-  public static void main(String[] args) {
-    // p/n indicates whether the interior address is really positive
-    // or negative. In unsigned terms, p1 < p2 < n1 < n2.
-
-    Win32Address p1 = new Win32Address(null, 0x7FFFFFFFFFFFFFF0L);
-    Win32Address p2 = (Win32Address) p1.addOffsetTo(10);
-    Win32Address n1 = (Win32Address) p2.addOffsetTo(10);
-    Win32Address n2 = (Win32Address) n1.addOffsetTo(10);
-
-    // lessThan positive tests
-    check(p1.lessThan(p2), "lessThan 1");
-    check(p1.lessThan(n1), "lessThan 2");
-    check(p1.lessThan(n2), "lessThan 3");
-    check(p2.lessThan(n1), "lessThan 4");
-    check(p2.lessThan(n2), "lessThan 5");
-    check(n1.lessThan(n2), "lessThan 6");
-
-    // lessThan negative tests
-    check(!p1.lessThan(p1), "lessThan 7");
-    check(!p2.lessThan(p2), "lessThan 8");
-    check(!n1.lessThan(n1), "lessThan 9");
-    check(!n2.lessThan(n2), "lessThan 10");
-
-    check(!p2.lessThan(p1), "lessThan 11");
-    check(!n1.lessThan(p1), "lessThan 12");
-    check(!n2.lessThan(p1), "lessThan 13");
-    check(!n1.lessThan(p2), "lessThan 14");
-    check(!n2.lessThan(p2), "lessThan 15");
-    check(!n2.lessThan(n1), "lessThan 16");
-
-    // lessThanOrEqual positive tests
-    check(p1.lessThanOrEqual(p1), "lessThanOrEqual 1");
-    check(p2.lessThanOrEqual(p2), "lessThanOrEqual 2");
-    check(n1.lessThanOrEqual(n1), "lessThanOrEqual 3");
-    check(n2.lessThanOrEqual(n2), "lessThanOrEqual 4");
-
-    check(p1.lessThanOrEqual(p2), "lessThanOrEqual 5");
-    check(p1.lessThanOrEqual(n1), "lessThanOrEqual 6");
-    check(p1.lessThanOrEqual(n2), "lessThanOrEqual 7");
-    check(p2.lessThanOrEqual(n1), "lessThanOrEqual 8");
-    check(p2.lessThanOrEqual(n2), "lessThanOrEqual 9");
-    check(n1.lessThanOrEqual(n2), "lessThanOrEqual 10");
-
-    // lessThanOrEqual negative tests
-    check(!p2.lessThanOrEqual(p1), "lessThanOrEqual 11");
-    check(!n1.lessThanOrEqual(p1), "lessThanOrEqual 12");
-    check(!n2.lessThanOrEqual(p1), "lessThanOrEqual 13");
-    check(!n1.lessThanOrEqual(p2), "lessThanOrEqual 14");
-    check(!n2.lessThanOrEqual(p2), "lessThanOrEqual 15");
-    check(!n2.lessThanOrEqual(n1), "lessThanOrEqual 16");
-
-    // greaterThan positive tests
-    check(n2.greaterThan(p1), "greaterThan 1");
-    check(n2.greaterThan(p2), "greaterThan 2");
-    check(n2.greaterThan(n1), "greaterThan 3");
-    check(n1.greaterThan(p1), "greaterThan 4");
-    check(n1.greaterThan(p2), "greaterThan 5");
-    check(p2.greaterThan(p1), "greaterThan 6");
-
-    // greaterThan negative tests
-    check(!p1.greaterThan(p1), "greaterThan 7");
-    check(!p2.greaterThan(p2), "greaterThan 8");
-    check(!n1.greaterThan(n1), "greaterThan 9");
-    check(!n2.greaterThan(n2), "greaterThan 10");
-
-    check(!p1.greaterThan(n2), "greaterThan 11");
-    check(!p2.greaterThan(n2), "greaterThan 12");
-    check(!n1.greaterThan(n2), "greaterThan 13");
-    check(!p1.greaterThan(n1), "greaterThan 14");
-    check(!p2.greaterThan(n1), "greaterThan 15");
-    check(!p1.greaterThan(p2), "greaterThan 16");
-
-    // greaterThanOrEqual positive tests
-    check(p1.greaterThanOrEqual(p1), "greaterThanOrEqual 1");
-    check(p2.greaterThanOrEqual(p2), "greaterThanOrEqual 2");
-    check(n1.greaterThanOrEqual(n1), "greaterThanOrEqual 3");
-    check(n2.greaterThanOrEqual(n2), "greaterThanOrEqual 4");
-
-    check(n2.greaterThanOrEqual(p1), "greaterThanOrEqual 5");
-    check(n2.greaterThanOrEqual(p2), "greaterThanOrEqual 6");
-    check(n2.greaterThanOrEqual(n1), "greaterThanOrEqual 7");
-    check(n1.greaterThanOrEqual(p1), "greaterThanOrEqual 8");
-    check(n1.greaterThanOrEqual(p2), "greaterThanOrEqual 9");
-    check(p2.greaterThanOrEqual(p1), "greaterThanOrEqual 10");
-
-    // greaterThanOrEqual negative tests
-    check(!p1.greaterThanOrEqual(n2), "greaterThanOrEqual 11");
-    check(!p2.greaterThanOrEqual(n2), "greaterThanOrEqual 12");
-    check(!n1.greaterThanOrEqual(n2), "greaterThanOrEqual 13");
-    check(!p1.greaterThanOrEqual(n1), "greaterThanOrEqual 14");
-    check(!p2.greaterThanOrEqual(n1), "greaterThanOrEqual 15");
-    check(!p1.greaterThanOrEqual(p2), "greaterThanOrEqual 16");
-
-    System.err.println("Win32Address: all tests passed successfully.");
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32CDebugInfoBuilder.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,824 +0,0 @@
-/*
- * Copyright (c) 2001, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import java.util.*;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.win32.coff.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
-import sun.jvm.hotspot.debugger.cdbg.basic.*;
-import sun.jvm.hotspot.utilities.Assert;
-
-class Win32CDebugInfoBuilder
-  implements DebugVC50SubsectionTypes, DebugVC50TypeLeafIndices, DebugVC50TypeEnums, DebugVC50SymbolTypes, DebugVC50MemberAttributes, CVAttributes, AccessControl {
-  private Win32Debugger dbg;
-  private Address       base;
-
-  private DebugVC50 vc50;
-  private BasicCDebugInfoDataBase db;
-  private DebugVC50TypeIterator iter;
-
-  private DebugVC50SymbolIterator symIter;
-
-  // Logical->physical segment mapping
-  private COFFFile file;
-  private DebugVC50SSSegMap segMap;
-
-  // Canonicalization of primitive types
-  private Map primIndexToTypeMap;
-
-  // Global unnamed enumeration
-  // (FIXME: must figure out how to handle nested type descriptions)
-  private BasicEnumType unnamedEnum;
-
-  private Stack blockStack;
-  private int   endsToSkip;
-
-  private static final int POINTER_SIZE = 4;
-
-  Win32CDebugInfoBuilder(Win32Debugger dbg) {
-    this.dbg = dbg;
-  }
-
-  CDebugInfoDataBase buildDataBase(String dllName, Address base) {
-    this.base = base;
-    file = COFFFileParser.getParser().parse(dllName);
-    vc50 = getDebugVC50(file);
-
-    if (vc50 == null) return null;
-
-    segMap = getSegMap();
-
-    primIndexToTypeMap = new HashMap();
-    blockStack = new Stack();
-    endsToSkip = 0;
-
-    db = new BasicCDebugInfoDataBase();
-    db.beginConstruction();
-
-    // Get global types and add them to the database
-    DebugVC50SSGlobalTypes types = getGlobalTypes();
-    for (iter = types.getTypeIterator(); !iter.done(); iter.next()) {
-      while (!iter.typeStringDone()) {
-        switch (iter.typeStringLeaf()) {
-        case LF_MODIFIER: {
-          int idx = iter.getModifierIndex();
-          BasicType target = getTypeByIndex(idx);
-          short windowsMods = iter.getModifierAttribute();
-          short mods = 0;
-          if ((windowsMods & MODIFIER_CONST_MASK)    != 0) mods |= CONST;
-          if ((windowsMods & MODIFIER_VOLATILE_MASK) != 0) mods |= VOLATILE;
-          putType(target.getCVVariant(mods));
-          break;
-        }
-        case LF_POINTER: {
-          int idx = iter.getPointerType();
-          BasicType target = getTypeByIndex(idx);
-          short windowsMods = iter.getModifierAttribute();
-          short mods = 0;
-          if ((windowsMods & POINTER_CONST_MASK)    != 0) mods |= CONST;
-          if ((windowsMods & POINTER_VOLATILE_MASK) != 0) mods |= VOLATILE;
-          BasicPointerType ptrType = new BasicPointerType(POINTER_SIZE, target);
-          if (mods != 0) {
-            ptrType = (BasicPointerType) ptrType.getCVVariant(mods);
-          }
-
-          putType(ptrType);
-          break;
-        }
-        case LF_ARRAY: {
-          BasicType elemType = getTypeByIndex(iter.getArrayElementType());
-          putType(new BasicArrayType(iter.getArrayName(), elemType, iter.getArrayLength()));
-          break;
-        }
-        case LF_CLASS:
-        case LF_STRUCTURE: {
-          CompoundTypeKind kind = ((iter.typeStringLeaf() == LF_CLASS) ? CompoundTypeKind.CLASS
-                                                                       : CompoundTypeKind.STRUCT);
-          BasicCompoundType type = new BasicCompoundType(iter.getClassName(),
-                                                         iter.getClassSize(),
-                                                         kind);
-          // Skip parsing of forward references to types
-          // FIXME: do we have to resolve these later?
-          if ((iter.getClassProperty() & PROPERTY_FWDREF) == 0) {
-            DebugVC50TypeIterator fieldIter = iter.getClassFieldListIterator();
-            if (Assert.ASSERTS_ENABLED) {
-              Assert.that(fieldIter.typeStringLeaf() == LF_FIELDLIST, "Expected field list");
-            }
-            boolean advance = false;
-            while (!fieldIter.typeStringDone()) {
-              advance = true;
-              switch (fieldIter.typeStringLeaf()) {
-              case LF_FIELDLIST: break;
-              case LF_BCLASS: {
-                int accessControl = memberAttributeToAccessControl(fieldIter.getBClassAttribute());
-                Type baseType = getTypeByIndex(fieldIter.getBClassType());
-                // FIXME: take offset into account
-                type.addBaseClass(new BasicBaseClass(accessControl, false, baseType));
-                break;
-              }
-              case LF_VBCLASS: {
-                int accessControl = memberAttributeToAccessControl(fieldIter.getVBClassAttribute());
-                Type baseType = getTypeByIndex(fieldIter.getVBClassBaseClassType());
-                // FIXME: take offset and virtual base offset into account
-                type.addBaseClass(new BasicBaseClass(accessControl, true, baseType));
-                break;
-              }
-              // I don't think we need to handle indirect virtual base
-              // classes since they should be handled indirectly through
-              // the modeling of the type hierarchy
-              case LF_IVBCLASS: break;
-              case LF_INDEX: {
-                fieldIter = fieldIter.getIndexIterator();
-                advance = false;
-                break;
-              }
-              case LF_MEMBER: {
-                BasicField field = new BasicField(fieldIter.getMemberName(),
-                                                  getTypeByIndex(fieldIter.getMemberType()),
-                                                  memberAttributeToAccessControl(fieldIter.getMemberAttribute()),
-                                                  false);
-                field.setOffset(fieldIter.getMemberOffset());
-                type.addField(field);
-                break;
-              }
-              case LF_STMEMBER: {
-                BasicField field = new BasicField(fieldIter.getStaticName(),
-                                                  getTypeByIndex(fieldIter.getStaticType()),
-                                                  memberAttributeToAccessControl(fieldIter.getStaticAttribute()),
-                                                  true);
-                // The field's address will be found during resolution
-                // of the debug info database
-                type.addField(field);
-                break;
-              }
-              // FIXME: handle methods
-              case LF_METHOD: break;
-              case LF_ONEMETHOD: break;
-                // FIXME: handle nested types
-              case LF_NESTTYPE: break;
-              case LF_NESTTYPEEX: break;
-                // NOTE: virtual functions not needed/handled yet for
-                // this debugging system (because we are not planning to
-                // handle calling methods in the target process at
-                // runtime)
-              case LF_VFUNCTAB: break;
-              case LF_FRIENDCLS: break;
-              case LF_VFUNCOFF: break;
-              case LF_MEMBERMODIFY: break;
-              case LF_PAD0:  case LF_PAD1:  case LF_PAD2:  case LF_PAD3:
-              case LF_PAD4:  case LF_PAD5:  case LF_PAD6:  case LF_PAD7:
-              case LF_PAD8:  case LF_PAD9:  case LF_PAD10: case LF_PAD11:
-              case LF_PAD12: case LF_PAD13: case LF_PAD14: case LF_PAD15: break;
-              default: System.err.println("WARNING: unexpected leaf index " +
-                                          fieldIter.typeStringLeaf() +
-                                          " in field list for type " + iter.getTypeIndex());
-              }
-              if (advance) {
-                fieldIter.typeStringNext();
-              }
-            }
-          }
-          putType(type);
-          break;
-        }
-        case LF_UNION: {
-          BasicCompoundType type = new BasicCompoundType(iter.getUnionName(),
-                                                         iter.getUnionSize(),
-                                                         CompoundTypeKind.UNION);
-          // Skip parsing of forward references to types
-          // FIXME: do we have to resolve these later?
-          if ((iter.getClassProperty() & PROPERTY_FWDREF) == 0) {
-            DebugVC50TypeIterator fieldIter = iter.getUnionFieldListIterator();
-            if (Assert.ASSERTS_ENABLED) {
-              Assert.that(fieldIter.typeStringLeaf() == LF_FIELDLIST, "Expected field list");
-            }
-            boolean advance = false;
-            while (!fieldIter.typeStringDone()) {
-              advance = true;
-              switch (fieldIter.typeStringLeaf()) {
-              case LF_FIELDLIST: break;
-              case LF_BCLASS:    break;
-              case LF_VBCLASS:   break;
-              case LF_IVBCLASS:  break;
-              case LF_INDEX: {
-                fieldIter = fieldIter.getIndexIterator();
-                advance = false;
-                break;
-              }
-              case LF_MEMBER: {
-                BasicField field = new BasicField(fieldIter.getMemberName(),
-                                                  getTypeByIndex(fieldIter.getMemberType()),
-                                                  memberAttributeToAccessControl(fieldIter.getMemberAttribute()),
-                                                  false);
-                field.setOffset(fieldIter.getMemberOffset());
-                type.addField(field);
-                break;
-              }
-              case LF_STMEMBER: {
-                System.err.println("WARNING: I didn't think unions could contain static fields...");
-                BasicField field = new BasicField(fieldIter.getStaticName(),
-                                                  getTypeByIndex(fieldIter.getStaticType()),
-                                                  memberAttributeToAccessControl(fieldIter.getStaticAttribute()),
-                                                  true);
-                // The field's address will be found during resolution
-                // of the debug info database
-                type.addField(field);
-                break;
-              }
-              case LF_METHOD: break;
-              case LF_ONEMETHOD: break;
-                // FIXME: handle nested types
-              case LF_NESTTYPE: break;
-              case LF_NESTTYPEEX: break;
-              case LF_VFUNCTAB: break;
-              case LF_FRIENDCLS: break;
-              case LF_VFUNCOFF: break;
-              case LF_MEMBERMODIFY: break;
-              case LF_PAD0:  case LF_PAD1:  case LF_PAD2:  case LF_PAD3:
-              case LF_PAD4:  case LF_PAD5:  case LF_PAD6:  case LF_PAD7:
-              case LF_PAD8:  case LF_PAD9:  case LF_PAD10: case LF_PAD11:
-              case LF_PAD12: case LF_PAD13: case LF_PAD14: case LF_PAD15: break;
-
-              default: System.err.println("WARNING: unexpected leaf index " +
-                                          fieldIter.typeStringLeaf() +
-                                          " in field list for union of type " + iter.getTypeIndex());
-              }
-              if (advance) {
-                fieldIter.typeStringNext();
-              }
-            }
-          }
-          putType(type);
-          break;
-        }
-        case LF_ENUM: {
-          String name = iter.getEnumName();
-          BasicEnumType enumType = null;
-          if ((name == null) || (name.equals(""))) {
-            if (unnamedEnum == null) {
-              unnamedEnum = new BasicEnumType(null, getTypeByIndex(iter.getEnumType()));
-            }
-            enumType = unnamedEnum;
-          } else {
-            enumType = new BasicEnumType(name, getTypeByIndex(iter.getEnumType()));
-          }
-          DebugVC50TypeIterator fieldIter = iter.getEnumFieldListIterator();
-          if (Assert.ASSERTS_ENABLED) {
-            Assert.that(fieldIter.typeStringLeaf() == LF_FIELDLIST, "Expected field list");
-          }
-          boolean advance = false;
-          while (!fieldIter.typeStringDone()) {
-            advance = true;
-            switch (fieldIter.typeStringLeaf()) {
-            case LF_FIELDLIST: break;
-            case LF_ENUMERATE: {
-              String enumName = fieldIter.getEnumerateName();
-              long   enumVal  = fieldIter.getEnumerateValue();
-              enumType.addEnum(enumName, enumVal);
-              break;
-            }
-            case LF_INDEX: {
-              fieldIter = fieldIter.getIndexIterator();
-              advance = false;
-              break;
-            }
-
-            case LF_PAD0:  case LF_PAD1:  case LF_PAD2:  case LF_PAD3:
-            case LF_PAD4:  case LF_PAD5:  case LF_PAD6:  case LF_PAD7:
-            case LF_PAD8:  case LF_PAD9:  case LF_PAD10: case LF_PAD11:
-            case LF_PAD12: case LF_PAD13: case LF_PAD14: case LF_PAD15: break;
-
-            default: System.err.println("WARNING: unexpected leaf index " +
-                                        fieldIter.typeStringLeaf() +
-                                        " in field list for enum of type " + iter.getTypeIndex());
-            }
-
-            if (advance) {
-              fieldIter.typeStringNext();
-            }
-          }
-
-          putType(enumType);
-          break;
-        }
-        case LF_PROCEDURE: {
-          Type retType = getTypeByIndex(iter.getProcedureReturnType());
-          BasicFunctionType func = new BasicFunctionType(null, POINTER_SIZE, retType);
-          DebugVC50TypeIterator argIter = iter.getProcedureArgumentListIterator();
-          if (Assert.ASSERTS_ENABLED) {
-            Assert.that(argIter.typeStringLeaf() == LF_ARGLIST, "Expected argument list");
-          }
-          for (int i = 0; i < argIter.getArgListCount(); i++) {
-            func.addArgumentType(getTypeByIndex(argIter.getArgListType(i)));
-          }
-          putType(func);
-          break;
-        }
-        case LF_MFUNCTION: {
-          Type retType   = getTypeByIndex(iter.getMFunctionReturnType());
-          Type container = getTypeByIndex(iter.getMFunctionContainingClass());
-          Type thisType  = getTypeByIndex(iter.getMFunctionThis());
-          long thisAdjust = iter.getMFunctionThisAdjust();
-          BasicMemberFunctionType func = new BasicMemberFunctionType(null,
-                                                                     POINTER_SIZE,
-                                                                     retType,
-                                                                     container,
-                                                                     thisType,
-                                                                     thisAdjust);
-          DebugVC50TypeIterator argIter = iter.getMFunctionArgumentListIterator();
-          for (int i = 0; i < argIter.getArgListCount(); i++) {
-            func.addArgumentType(getTypeByIndex(argIter.getArgListType(i)));
-          }
-          putType(func);
-          break;
-        }
-        // FIXME: handle virtual function table shape description
-        case LF_VTSHAPE: break;
-        case LF_BARRAY: System.err.println("FIXME: don't know what to do with LF_BARRAY leaves (convert to pointers?"); break;
-        case LF_LABEL: break;
-        case LF_NULL: break; // FIXME: do we need to handle this? With what?
-        case LF_DIMARRAY: System.err.println("FIXME: don't know what to do with LF_DIMARRAY leaves yet"); break;
-        case LF_VFTPATH: break;
-        case LF_PRECOMP: break;
-        case LF_ENDPRECOMP: break;
-        case LF_OEM: break;
-        case LF_TYPESERVER: break;
-
-        // Type records referenced from other type records
-
-        case LF_SKIP: break;
-        case LF_ARGLIST: skipTypeRecord(); break;
-        case LF_DEFARG: System.err.println("FIXME: handle default arguments (dereference the type)"); break;
-        case LF_FIELDLIST: skipTypeRecord(); break;
-        case LF_DERIVED: break;
-        case LF_BITFIELD: {
-          Type underlyingType = getTypeByIndex(iter.getBitfieldFieldType());
-          BasicBitType bit = new BasicBitType(underlyingType,
-                                              (iter.getBitfieldLength() & 0xFF),
-                                              (iter.getBitfieldPosition() & 0xFF));
-          putType(bit);
-          break;
-        }
-        case LF_METHODLIST: break;
-        case LF_DIMCONU:
-        case LF_DIMCONLU:
-        case LF_DIMVARU:
-        case LF_DIMVARLU: break;
-        case LF_REFSYM: break;
-
-        case LF_PAD0:  case LF_PAD1:  case LF_PAD2:  case LF_PAD3:
-        case LF_PAD4:  case LF_PAD5:  case LF_PAD6:  case LF_PAD7:
-        case LF_PAD8:  case LF_PAD9:  case LF_PAD10: case LF_PAD11:
-        case LF_PAD12: case LF_PAD13: case LF_PAD14: case LF_PAD15: break;
-
-        default: {
-          System.err.println("Unexpected leaf index " +
-                             iter.typeStringLeaf() + " at offset 0x" +
-                             Integer.toHexString(iter.typeStringOffset()));
-          break;
-        }
-        }
-
-
-        if (!iter.typeStringDone()) {
-          iter.typeStringNext();
-        }
-      }
-    }
-
-    // Add all symbol directories to debug info
-    // (FIXME: must figure out how to handle module-by-module
-    // arrangement of at least the static symbols to have proper
-    // lookup -- should probably also take advantage of the PROCREF
-    // and UDT references to understand how to build the global
-    // database vs. the module-by-module one)
-    DebugVC50SubsectionDirectory dir = vc50.getSubsectionDirectory();
-    int moduleNumber = 0; // Debugging
-    for (int i = 0; i < dir.getNumEntries(); i++) {
-      DebugVC50Subsection ss = dir.getSubsection(i);
-      int ssType = ss.getSubsectionType();
-      boolean process = false;
-
-      if ((ssType == SST_GLOBAL_SYM) ||
-          (ssType == SST_GLOBAL_PUB) ||
-          (ssType == SST_STATIC_SYM)) {
-        DebugVC50SSSymbolBase syms = (DebugVC50SSSymbolBase) ss;
-        symIter = syms.getSymbolIterator();
-        process = true;
-      }
-
-      if (ssType == SST_ALIGN_SYM) {
-        DebugVC50SSAlignSym syms = (DebugVC50SSAlignSym) ss;
-        symIter = syms.getSymbolIterator();
-        process = true;
-      }
-
-      if (process) {
-        for (; !symIter.done(); symIter.next()) {
-          switch (symIter.getType()) {
-          case S_COMPILE: break;
-          case S_SSEARCH: break; // FIXME: may need this later
-          case S_END: {
-            try {
-              // FIXME: workaround for warnings until we figure out
-              // what to do with THUNK32 symbols
-              if (endsToSkip == 0) {
-                blockStack.pop();
-              } else {
-                --endsToSkip;
-              }
-            } catch (EmptyStackException e) {
-              System.err.println("WARNING: mismatched block begins/ends in debug information");
-            }
-            break;
-          }
-          case S_SKIP: break;
-          case S_CVRESERVE: break;
-          case S_OBJNAME: break; // FIXME: may need this later
-          case S_ENDARG: break;
-          case S_COBOLUDT: break;
-          case S_MANYREG: break; // FIXME: may need to add support for this
-          case S_RETURN: break;  // NOTE: would need this if adding support for calling functions
-          case S_ENTRYTHIS: break; // FIXME: may need to add support for this
-          case S_REGISTER: break; // FIXME: may need to add support for this
-          case S_CONSTANT: break; // FIXME: will need to add support for this
-          case S_UDT: break; // FIXME: need to see how these are used; are
-            // they redundant, or are they used to describe
-            // global variables as opposed to types?
-          case S_COBOLUDT2: break;
-          case S_MANYREG2: break;
-          case S_BPREL32: {
-            LocalSym sym = new BasicLocalSym(symIter.getBPRelName(),
-                                             getTypeByIndex(symIter.getBPRelType()),
-                                             symIter.getBPRelOffset());
-            addLocalToCurBlock(sym);
-            break;
-          }
-          case S_LDATA32:
-          case S_GDATA32: {
-            // FIXME: must handle these separately from global data (have
-            // module scoping and only add these at the module level)
-            boolean isModuleLocal = (symIter.getType() == S_LDATA32);
-
-            GlobalSym sym = new BasicGlobalSym(symIter.getLGDataName(),
-                                               getTypeByIndex(symIter.getLGDataType()),
-                                               newAddress(symIter.getLGDataOffset(), symIter.getLGDataSegment()),
-                                               isModuleLocal);
-            // FIXME: must handle module-local symbols differently
-            addGlobalSym(sym);
-            break;
-          }
-          case S_PUB32: break; // FIXME: figure out how these differ from
-            // above and how they are used
-          case S_LPROC32:
-          case S_GPROC32: {
-            BasicFunctionSym sym = new BasicFunctionSym(newLazyBlockSym(symIter.getLGProcParentOffset()),
-                                                        symIter.getLGProcLength(),
-                                                        newAddress(symIter.getLGProcOffset(), symIter.getLGProcSegment()),
-                                                        symIter.getLGProcName(),
-                                                        getTypeByIndex(symIter.getLGProcType()),
-                                                        (symIter.getType() == S_LPROC32));
-
-            // FIXME: have to handle local procedures differently (have
-            // notion of modules and only add those procedures to the
-            // module they are defined in)
-            addBlock(sym);
-            break;
-          }
-          case S_THUNK32: {
-            // FIXME: see whether we need to handle these
-            skipEnd();
-            break;
-          }
-          case S_BLOCK32: {
-            BasicBlockSym sym = new BasicBlockSym(newLazyBlockSym(symIter.getBlockParentOffset()),
-                                                  symIter.getBlockLength(),
-                                                  newAddress(symIter.getBlockOffset(), symIter.getBlockSegment()),
-                                                  symIter.getBlockName());
-            addBlock(sym);
-            break;
-          }
-          case S_WITH32: break;
-          case S_LABEL32: break;
-          case S_CEXMODEL32: break;
-          case S_VFTTABLE32: break; // FIXME: may need to handle this
-                                // (most likely for run-time type determination)
-          case S_REGREL32: break;   // FIXME: may need to add support for this
-          case S_LTHREAD32: break;
-          case S_GTHREAD32: break;  // FIXME: may need to add support for these
-          case S_PROCREF: break;
-          case S_DATAREF: break;
-          case S_ALIGN: break;
-          default:
-            // These two unknown symbol types show up very frequently.
-            // Symbol type 0 appears to always be a no-op symbol of
-            // length 2 (i.e., length just covers the symbol type.)
-            // Symbol type 4115 appears to be a copyright notice for
-            // the Microsoft linker.
-            if ((symIter.getType() != 0) && (symIter.getType() != 4115)) {
-              System.err.println("  NOTE: Unexpected symbol of type " +
-                                 symIter.getType() + " at offset 0x" +
-                                 Integer.toHexString(symIter.getOffset()));
-            }
-            break;
-          }
-        }
-      }
-    }
-
-    // Add line number information for all modules
-    for (int i = 0; i < dir.getNumEntries(); i++) {
-      DebugVC50Subsection ss = dir.getSubsection(i);
-      if (ss.getSubsectionType() == SST_SRC_MODULE) {
-        DebugVC50SSSrcModule srcMod = (DebugVC50SSSrcModule) ss;
-        for (int sf = 0; sf < srcMod.getNumSourceFiles(); sf++) {
-          DebugVC50SrcModFileDesc desc = srcMod.getSourceFileDesc(sf);
-          // Uniquify these to save space
-          String name = desc.getSourceFileName().intern();
-          for (int cs = 0; cs < desc.getNumCodeSegments(); cs++) {
-            DebugVC50SrcModLineNumberMap map = desc.getLineNumberMap(cs);
-            SectionHeader seg = file.getHeader().getSectionHeader(map.getSegment());
-            for (int lp = 0; lp < map.getNumSourceLinePairs(); lp++) {
-              Address startPC = base.addOffsetTo(seg.getVirtualAddress() + map.getCodeOffset(lp));
-              // Fake address for endPC -- will be filled in by BasicLineNumberMapping
-              Address endPC = base.addOffsetTo(seg.getSize());
-              db.addLineNumberInfo(new BasicLineNumberInfo(name, map.getLineNumber(lp), startPC, endPC));
-            }
-          }
-        }
-      }
-    }
-
-    // Finish assembly of database
-    db.resolve(new ResolveListener() {
-        public void resolveFailed(Type containingType, LazyType failedResolve, String detail) {
-          System.err.println("WARNING: failed to resolve type of index " +
-                             ((Integer) failedResolve.getKey()).intValue() +
-                             " in type " + containingType.getName() + " (class " +
-                             containingType.getClass().getName() + ") while " + detail);
-        }
-
-        public void resolveFailed(Type containingType, String staticFieldName) {
-          System.err.println("WARNING: failed to resolve address of static field \"" +
-                             staticFieldName + "\" in type " + containingType.getName());
-        }
-
-        public void resolveFailed(Sym containingSymbol, LazyType failedResolve, String detail) {
-          System.err.println("WARNING: failed to resolve type of index " +
-                             ((Integer) failedResolve.getKey()).intValue() +
-                             " in symbol of type " + containingSymbol.getClass().getName() +
-                             " while " + detail);
-        }
-
-        public void resolveFailed(Sym containingSymbol, LazyBlockSym failedResolve, String detail) {
-          System.err.println("WARNING: failed to resolve block at offset 0x" +
-                             Integer.toHexString(((Integer) failedResolve.getKey()).intValue()) +
-                             " in symbol of type " + containingSymbol.getClass().getName() +
-                             " while " + detail);
-        }
-      });
-
-    db.endConstruction();
-
-    return db;
-  }
-
-
-  //----------------------------------------------------------------------
-  // Internals only below this point
-  //
-
-  private static DebugVC50 getDebugVC50(COFFFile file) {
-    COFFHeader header = file.getHeader();
-    OptionalHeader opt = header.getOptionalHeader();
-    if (opt == null) {
-      // Optional header not found
-      return null;
-    }
-    OptionalHeaderDataDirectories dd = opt.getDataDirectories();
-    if (dd == null) {
-      // Optional header data directories not found
-      return null;
-    }
-    DebugDirectory debug = dd.getDebugDirectory();
-    if (debug == null) {
-      // Debug directory not found
-      return null;
-    }
-    for (int i = 0; i < debug.getNumEntries(); i++) {
-      DebugDirectoryEntry entry = debug.getEntry(i);
-      if (entry.getType() == DebugTypes.IMAGE_DEBUG_TYPE_CODEVIEW) {
-        return entry.getDebugVC50();
-      }
-    }
-
-    // CodeView information not found in debug directory
-    return null;
-  }
-
-  private DebugVC50SSSegMap getSegMap() {
-    return (DebugVC50SSSegMap) findSubsection(SST_SEG_MAP);
-  }
-
-  private DebugVC50SSGlobalTypes getGlobalTypes() {
-    return (DebugVC50SSGlobalTypes) findSubsection(SST_GLOBAL_TYPES);
-  }
-
-  private DebugVC50SSGlobalSym getGlobalSymbols() {
-    return (DebugVC50SSGlobalSym) findSubsection(SST_GLOBAL_SYM);
-  }
-
-  private DebugVC50Subsection findSubsection(short ssType) {
-    DebugVC50SubsectionDirectory dir = vc50.getSubsectionDirectory();
-    for (int i = 0; i < dir.getNumEntries(); i++) {
-      DebugVC50Subsection ss = dir.getSubsection(i);
-      if (ss.getSubsectionType() == ssType) {
-        return ss;
-      }
-    }
-    throw new DebuggerException("Unable to find subsection of type " + ssType);
-  }
-
-  private void putType(Type t) {
-    db.addType(new Integer(iter.getTypeIndex()), t);
-  }
-
-  private Address newAddress(int offset, short segment) {
-    int seg = segment & 0xFFFF;
-    // NOTE: it isn't clear how to use the segMap to map from logical
-    // to physical segments. It seems it would make more sense if the
-    // SegDescs contained a physical segment number in addition to the
-    // offset within the physical segment of the logical one.
-
-    // Get the section header corresponding to this segment
-    SectionHeader section = file.getHeader().getSectionHeader(seg);
-
-    // Result is relative to image base
-    return base.addOffsetTo(section.getVirtualAddress() + offset);
-  }
-
-  private BasicType getTypeByIndex(int intIndex) {
-    Integer index = new Integer(intIndex);
-
-    // Handle primitive types here.
-    if (intIndex <= 0x0FFF) {
-      BasicType type = (BasicType) primIndexToTypeMap.get(index);
-      if (type != null) {
-        return type;
-      }
-      // Construct appropriate new primitive type
-      int primMode = intIndex & RESERVED_MODE_MASK;
-      if (primMode == RESERVED_MODE_DIRECT) {
-        int primType = intIndex & RESERVED_TYPE_MASK;
-        switch (primType) {
-        case RESERVED_TYPE_SIGNED_INT:
-        case RESERVED_TYPE_UNSIGNED_INT: {
-          boolean unsigned = (primType == RESERVED_TYPE_UNSIGNED_INT);
-          int size = 0;
-          String name = null;
-          switch (intIndex & RESERVED_SIZE_MASK) {
-          case RESERVED_SIZE_INT_1_BYTE: size = 1; name = "char";    break;
-          case RESERVED_SIZE_INT_2_BYTE: size = 2; name = "short";   break;
-          case RESERVED_SIZE_INT_4_BYTE: size = 4; name = "int";     break;
-          case RESERVED_SIZE_INT_8_BYTE: size = 8; name = "__int64"; break;
-          default: throw new DebuggerException("Illegal size of integer type " + intIndex);
-          }
-          type = new BasicIntType(name, size, unsigned);
-          break;
-        }
-        case RESERVED_TYPE_BOOLEAN: {
-          int size = 0;
-          switch (intIndex & RESERVED_SIZE_MASK) {
-          case RESERVED_SIZE_INT_1_BYTE: size = 1; break;
-          case RESERVED_SIZE_INT_2_BYTE: size = 2; break;
-          case RESERVED_SIZE_INT_4_BYTE: size = 4; break;
-          case RESERVED_SIZE_INT_8_BYTE: size = 8; break;
-          default: throw new DebuggerException("Illegal size of boolean type " + intIndex);
-          }
-          type = new BasicIntType("bool", size, false);
-          break;
-        }
-        case RESERVED_TYPE_REAL: {
-          switch (intIndex & RESERVED_SIZE_MASK) {
-          case RESERVED_SIZE_REAL_32_BIT:
-            type = new BasicFloatType("float", 4);
-            break;
-          case RESERVED_SIZE_REAL_64_BIT:
-            type = new BasicDoubleType("double", 8);
-            break;
-          default:
-            throw new DebuggerException("Unsupported floating-point size in type " + intIndex);
-          }
-          break;
-        }
-        case RESERVED_TYPE_REALLY_INT: {
-          switch (intIndex & RESERVED_SIZE_MASK) {
-          case RESERVED_SIZE_REALLY_INT_CHAR:     type = new BasicIntType("char",    1, false); break;
-          case RESERVED_SIZE_REALLY_INT_WCHAR:    type = new BasicIntType("wchar",   2, false); break;
-          case RESERVED_SIZE_REALLY_INT_2_BYTE:   type = new BasicIntType("short",   2, false); break;
-          case RESERVED_SIZE_REALLY_INT_2_BYTE_U: type = new BasicIntType("short",   2, true);  break;
-          case RESERVED_SIZE_REALLY_INT_4_BYTE:   type = new BasicIntType("int",     4, false); break;
-          case RESERVED_SIZE_REALLY_INT_4_BYTE_U: type = new BasicIntType("int",     4, true);  break;
-          case RESERVED_SIZE_REALLY_INT_8_BYTE:   type = new BasicIntType("__int64", 8, false); break;
-          case RESERVED_SIZE_REALLY_INT_8_BYTE_U: type = new BasicIntType("__int64", 8, true);  break;
-          default: throw new DebuggerException("Illegal REALLY_INT size in type " + intIndex);
-          }
-          break;
-        }
-        case RESERVED_TYPE_SPECIAL: {
-          switch (intIndex & RESERVED_SIZE_MASK) {
-          case RESERVED_SIZE_SPECIAL_NO_TYPE:
-          case RESERVED_SIZE_SPECIAL_VOID: type = new BasicVoidType(); break;
-          default: throw new DebuggerException("Don't know how to handle reserved special type " + intIndex);
-          }
-          break;
-        }
-
-        default:
-          throw new DebuggerException("Don't know how to handle reserved type " + intIndex);
-        }
-      } else {
-        // Fold all pointer types together since we only support
-        // flat-mode addressing anyway
-        Type targetType = getTypeByIndex(intIndex & (~RESERVED_MODE_MASK));
-
-        type = new BasicPointerType(POINTER_SIZE, targetType);
-      }
-      if (Assert.ASSERTS_ENABLED) {
-        Assert.that(type != null, "Got null Type for primitive type " + intIndex);
-      }
-      primIndexToTypeMap.put(index, type);
-      return type;
-    }
-
-    // Not primitive type. Construct lazy reference to target type.
-    // (Is it worth canonicalizing these as well to save space?)
-    return new LazyType(index);
-  }
-
-  private void addBlock(BlockSym block) {
-    db.addBlock(new Integer(symIter.getOffset()), block);
-    blockStack.push(block);
-  }
-
-  private void skipEnd() {
-    ++endsToSkip;
-  }
-
-  private BlockSym newLazyBlockSym(int offset) {
-    if (offset == 0) {
-      return null;
-    }
-
-    return new LazyBlockSym(new Integer(offset));
-  }
-
-  private int memberAttributeToAccessControl(short memberAttribute) {
-    int acc = memberAttribute & MEMATTR_ACCESS_MASK;
-    switch (acc) {
-    case MEMATTR_ACCESS_NO_PROTECTION: return NO_PROTECTION;
-    case MEMATTR_ACCESS_PRIVATE:       return PRIVATE;
-    case MEMATTR_ACCESS_PROTECTED:     return PROTECTED;
-    case MEMATTR_ACCESS_PUBLIC:        return PUBLIC;
-    default: throw new RuntimeException("Should not reach here");
-    }
-  }
-
-  private void addLocalToCurBlock(LocalSym local) {
-    ((BasicBlockSym) blockStack.peek()).addLocal(local);
-  }
-
-  private void addGlobalSym(GlobalSym sym) {
-    db.addGlobalSym(sym);
-  }
-
-  private void skipTypeRecord() {
-    while (!iter.typeStringDone()) {
-      iter.typeStringNext();
-    }
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32CDebugger.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2001, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
-import sun.jvm.hotspot.debugger.cdbg.basic.x86.*;
-import sun.jvm.hotspot.debugger.x86.*;
-import sun.jvm.hotspot.utilities.AddressOps;
-
-class Win32CDebugger implements CDebugger, ProcessControl {
-  // FIXME: think about how to make this work in a remote debugging
-  // scenario; who should keep open DLLs? Need local copies of these
-  // DLLs on the debugging machine?
-  private Win32Debugger dbg;
-
-  Win32CDebugger(Win32Debugger dbg) {
-    this.dbg = dbg;
-  }
-
-  public List getThreadList() throws DebuggerException {
-    return dbg.getThreadList();
-  }
-
-  public List/*<LoadObject>*/ getLoadObjectList() throws DebuggerException{
-    return dbg.getLoadObjectList();
-  }
-
-  public LoadObject loadObjectContainingPC(Address pc) throws DebuggerException {
-    // FIXME: could keep sorted list of these to be able to do binary
-    // searches, for better scalability
-    if (pc == null) {
-      return null;
-    }
-    List objs = getLoadObjectList();
-    for (Iterator iter = objs.iterator(); iter.hasNext(); ) {
-      LoadObject obj = (LoadObject) iter.next();
-      if (AddressOps.lte(obj.getBase(), pc) && (pc.minus(obj.getBase()) < obj.getSize())) {
-        return obj;
-      }
-    }
-    return null;
-  }
-
-  public CFrame topFrameForThread(ThreadProxy thread) throws DebuggerException {
-    X86ThreadContext context = (X86ThreadContext) thread.getContext();
-    Address ebp = context.getRegisterAsAddress(X86ThreadContext.EBP);
-    if (ebp == null) return null;
-    Address pc  = context.getRegisterAsAddress(X86ThreadContext.EIP);
-    if (pc == null) return null;
-    return new X86CFrame(this, ebp, pc);
-  }
-
-  public String getNameOfFile(String fileName) {
-    return new File(fileName).getName();
-  }
-
-  public ProcessControl getProcessControl() throws DebuggerException {
-    return this;
-  }
-
-  // C++ name demangling
-  public boolean canDemangle() {
-    return false;
-  }
-
-  public String demangle(String sym) {
-    throw new UnsupportedOperationException();
-  }
-
-  //
-  // Support for ProcessControl interface
-  //
-
-  public void suspend() throws DebuggerException {
-    dbg.suspend();
-  }
-  public void resume() throws DebuggerException {
-    dbg.resume();
-  }
-  public boolean isSuspended() throws DebuggerException {
-    return dbg.isSuspended();
-  }
-  public void setBreakpoint(Address addr) throws DebuggerException {
-    dbg.setBreakpoint(addr);
-  }
-  public void clearBreakpoint(Address addr) throws DebuggerException {
-    dbg.clearBreakpoint(addr);
-  }
-  public boolean isBreakpointSet(Address addr) throws DebuggerException {
-    return dbg.isBreakpointSet(addr);
-  }
-  public DebugEvent debugEventPoll() throws DebuggerException {
-    return dbg.debugEventPoll();
-  }
-  public void debugEventContinue() throws DebuggerException {
-    dbg.debugEventContinue();
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Debugger.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import java.util.List;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
-
-/** An extension of the JVMDebugger interface with a few additions to
-    support 32-bit vs. 64-bit debugging as well as features required
-    by the architecture-specific subpackages. */
-
-public interface Win32Debugger extends JVMDebugger {
-  public String       addressValueToString(long address) throws DebuggerException;
-  public boolean      readJBoolean(long address) throws DebuggerException;
-  public byte         readJByte(long address) throws DebuggerException;
-  public char         readJChar(long address) throws DebuggerException;
-  public double       readJDouble(long address) throws DebuggerException;
-  public float        readJFloat(long address) throws DebuggerException;
-  public int          readJInt(long address) throws DebuggerException;
-  public long         readJLong(long address) throws DebuggerException;
-  public short        readJShort(long address) throws DebuggerException;
-  public long         readCInteger(long address, long numBytes, boolean isUnsigned)
-    throws DebuggerException;
-  public Win32Address readAddress(long address) throws DebuggerException;
-  public Win32Address readCompOopAddress(long address) throws DebuggerException;
-  public Win32OopHandle readOopHandle(long address) throws DebuggerException;
-  public Win32OopHandle readCompOopHandle(long address) throws DebuggerException;
-  public void         writeJBoolean(long address, boolean value) throws DebuggerException;
-  public void         writeJByte(long address, byte value) throws DebuggerException;
-  public void         writeJChar(long address, char value) throws DebuggerException;
-  public void         writeJDouble(long address, double value) throws DebuggerException;
-  public void         writeJFloat(long address, float value) throws DebuggerException;
-  public void         writeJInt(long address, int value) throws DebuggerException;
-  public void         writeJLong(long address, long value) throws DebuggerException;
-  public void         writeJShort(long address, short value) throws DebuggerException;
-  public void         writeCInteger(long address, long numBytes, long value) throws DebuggerException;
-  public void         writeAddress(long address, Win32Address value) throws DebuggerException;
-  public void         writeOopHandle(long address, Win32OopHandle value) throws DebuggerException;
-
-  // On Windows the int is actually the value of a HANDLE which
-  // currently must be read from the target process; that is, the
-  // target process must maintain its own thread list, each element of
-  // which holds a HANDLE to its underlying OS thread. FIXME: should
-  // add access to the OS-level thread list, but there are too many
-  // limitations imposed by Windows to usefully do so; see
-  // src/os/win32/agent/README-commands.txt, command "duphandle".
-  //
-  // The returned array of register contents is guaranteed to be in
-  // the same order as in the DbxDebugger for Solaris/x86; that is,
-  // the indices match those in debugger/x86/X86ThreadContext.java.
-  public long[]       getThreadIntegerRegisterSet(int threadHandleValue,
-                                                  boolean mustDuplicateHandle) throws DebuggerException;
-  // Implmentation of setContext
-  public void         setThreadIntegerRegisterSet(int threadHandleValue,
-                                                  boolean mustDuplicateHandle,
-                                                  long[] contents) throws DebuggerException;
-
-  public Address      newAddress(long value) throws DebuggerException;
-
-  // Routine supporting the ThreadProxy implementation, in particular
-  // the ability to get a thread ID from a thread handle via
-  // examination of the Thread Information Block. Fetch the LDT entry
-  // for a given selector.
-  public Win32LDTEntry getThreadSelectorEntry(int threadHandleValue,
-                                              boolean mustDuplicateHandle,
-                                              int selector) throws DebuggerException;
-
-  // Support for the CDebugger interface. Retrieves the thread list of
-  // the target process as a List of ThreadProxy objects.
-  public List/*<ThreadProxy>*/ getThreadList() throws DebuggerException;
-
-  // Support for the CDebugger interface. Retrieves a List of the
-  // loadobjects in the target process.
-  public List/*<LoadObject>*/ getLoadObjectList() throws DebuggerException;
-
-  // Support for the ProcessControl interface
-  public void writeBytesToProcess(long startAddress, long numBytes, byte[] data) throws UnmappedAddressException, DebuggerException;
-  public void suspend() throws DebuggerException;
-  public void resume() throws DebuggerException;
-  public boolean isSuspended() throws DebuggerException;
-  public void setBreakpoint(Address addr) throws DebuggerException;
-  public void clearBreakpoint(Address addr) throws DebuggerException;
-  public boolean isBreakpointSet(Address addr) throws DebuggerException;
-  // FIXME: do not want to expose complicated data structures (like
-  // the DebugEvent) in this interface due to serialization issues
-  public DebugEvent debugEventPoll() throws DebuggerException;
-  public void debugEventContinue() throws DebuggerException;
-
-  // NOTE: this interface implicitly contains the following methods:
-  // From the Debugger interface via JVMDebugger
-  //   public void attach(int processID) throws DebuggerException;
-  //   public void attach(String executableName, String coreFileName) throws DebuggerException;
-  //   public boolean detach();
-  //   public Address parseAddress(String addressString) throws NumberFormatException;
-  //   public long getAddressValue(Address addr) throws DebuggerException;
-  //   public String getOS();
-  //   public String getCPU();
-  // From the SymbolLookup interface via Debugger and JVMDebugger
-  //   public Address lookup(String objectName, String symbol);
-  //   public OopHandle lookupOop(String objectName, String symbol);
-  // From the JVMDebugger interface
-  //   public void configureJavaPrimitiveTypeSizes(long jbooleanSize,
-  //                                               long jbyteSize,
-  //                                               long jcharSize,
-  //                                               long jdoubleSize,
-  //                                               long jfloatSize,
-  //                                               long jintSize,
-  //                                               long jlongSize,
-  //                                               long jshortSize);
-  // From the ThreadAccess interface via Debugger and JVMDebugger
-  //   public ThreadProxy getThreadForIdentifierAddress(Address addr);
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32DebuggerLocal.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1083 +0,0 @@
-/*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import java.io.*;
-import java.net.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.x86.*;
-import sun.jvm.hotspot.debugger.win32.coff.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
-import sun.jvm.hotspot.debugger.cdbg.basic.BasicDebugEvent;
-import sun.jvm.hotspot.utilities.*;
-import sun.jvm.hotspot.utilities.memo.*;
-
-/** <P> An implementation of the JVMDebugger interface which talks to
-    the Free Windows Debug Server (FwDbgSrv) over a socket to
-    implement attach/detach and read from process memory. All DLL and
-    symbol table management is done in Java. </P>
-
-    <P> <B>NOTE</B> that since we have the notion of fetching "Java
-    primitive types" from the remote process (which might have
-    different sizes than we expect) we have a bootstrapping
-    problem. We need to know the sizes of these types before we can
-    fetch them. The current implementation solves this problem by
-    requiring that it be configured with these type sizes before they
-    can be fetched. The readJ(Type) routines here will throw a
-    RuntimeException if they are called before the debugger is
-    configured with the Java primitive type sizes. </P> */
-
-public class Win32DebuggerLocal extends DebuggerBase implements Win32Debugger {
-  private Socket debuggerSocket;
-  private boolean attached;
-  // FIXME: update when core files supported
-  private long pid;
-  // Communication with debug server
-  private PrintWriter out;
-  private DataOutputStream rawOut;
-  private InputLexer in;
-  private static final int PORT = 27000;
-  private PageCache cache;
-  private static final long SHORT_TIMEOUT = 2000;
-  private static final long LONG_TIMEOUT = 20000;
-
-  // Symbol lookup support
-  // This is a map of library names to DLLs
-  private Map nameToDllMap;
-
-  // C/C++ debugging support
-  private List/*<LoadObject>*/ loadObjects;
-  private CDebugger cdbg;
-
-  // ProcessControl support
-  private boolean suspended;
-  // Maps Long objects (addresses) to Byte objects (original instructions)
-  // (Longs used instead of Addresses to properly represent breakpoints at 0x0 if needed)
-  private Map     breakpoints;
-  // Current debug event, if any
-  private DebugEvent curDebugEvent;
-
-  //--------------------------------------------------------------------------------
-  // Implementation of Debugger interface
-  //
-
-  /** <P> machDesc may not be null. </P>
-
-      <P> useCache should be set to true if debugging is being done
-      locally, and to false if the debugger is being created for the
-      purpose of supporting remote debugging. </P> */
-  public Win32DebuggerLocal(MachineDescription machDesc,
-                            boolean useCache) throws DebuggerException {
-    this.machDesc = machDesc;
-    utils = new DebuggerUtilities(machDesc.getAddressSize(), machDesc.isBigEndian());
-    if (useCache) {
-      // Cache portion of the remote process's address space.
-      // Fetching data over the socket connection to dbx is slow.
-      // Might be faster if we were using a binary protocol to talk to
-      // dbx, but would have to test. For now, this cache works best
-      // if it covers the entire heap of the remote process. FIXME: at
-      // least should make this tunable from the outside, i.e., via
-      // the UI. This is a cache of 4096 4K pages, or 16 MB. The page
-      // size must be adjusted to be the hardware's page size.
-      // (FIXME: should pick this up from the debugger.)
-      initCache(4096, parseCacheNumPagesProperty(4096));
-    }
-    // FIXME: add instantiation of thread factory
-
-    try {
-      connectToDebugServer();
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public boolean hasProcessList() throws DebuggerException {
-    return true;
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public List getProcessList() throws DebuggerException {
-    List processes = new ArrayList();
-
-    try {
-      printlnToOutput("proclist");
-      int num = in.parseInt();
-      for (int i = 0; i < num; i++) {
-        int pid = in.parseInt();
-        String name = parseString();
-        // NOTE: Win32 hack
-        if (name.equals("")) {
-          name = "System Idle Process";
-        }
-        processes.add(new ProcessInfo(name, pid));
-      }
-      return processes;
-    }
-    catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public synchronized void attach(int processID) throws DebuggerException {
-    if (attached) {
-      // FIXME: update when core files supported
-      throw new DebuggerException("Already attached to process " + pid);
-    }
-
-    try {
-      printlnToOutput("attach " + processID);
-      if (!in.parseBoolean()) {
-        throw new DebuggerException("Error attaching to process, or no such process");
-      }
-
-      attached = true;
-      pid = processID;
-      suspended = true;
-      breakpoints = new HashMap();
-      curDebugEvent = null;
-      nameToDllMap = null;
-      loadObjects = null;
-    }
-    catch (IOException e) {
-        throw new DebuggerException(e);
-    }
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public synchronized void attach(String executableName, String coreFileName) throws DebuggerException {
-    throw new DebuggerException("Core files not yet supported on Win32");
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public synchronized boolean detach() {
-    if (!attached) {
-      return false;
-    }
-
-    attached = false;
-    suspended = false;
-    breakpoints = null;
-
-    // Close all open DLLs
-    if (nameToDllMap != null) {
-      for (Iterator iter = nameToDllMap.values().iterator(); iter.hasNext(); ) {
-        DLL dll = (DLL) iter.next();
-        dll.close();
-      }
-      nameToDllMap = null;
-      loadObjects = null;
-    }
-
-    cdbg = null;
-    clearCache();
-
-    try {
-      printlnToOutput("detach");
-      return in.parseBoolean();
-    }
-    catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public Address parseAddress(String addressString) throws NumberFormatException {
-    return newAddress(utils.scanAddress(addressString));
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public String getOS() {
-    return PlatformInfo.getOS();
-  }
-
-  /** From the Debugger interface via JVMDebugger */
-  public String getCPU() {
-    return PlatformInfo.getCPU();
-  }
-
-  public boolean hasConsole() throws DebuggerException {
-    return false;
-  }
-
-  public String consoleExecuteCommand(String cmd) throws DebuggerException {
-    throw new DebuggerException("No debugger console available on Win32");
-  }
-
-  public String getConsolePrompt() throws DebuggerException {
-    return null;
-  }
-
-  public CDebugger getCDebugger() throws DebuggerException {
-    if (cdbg == null) {
-      cdbg = new Win32CDebugger(this);
-    }
-    return cdbg;
-  }
-
-  /** From the SymbolLookup interface via Debugger and JVMDebugger */
-  public synchronized Address lookup(String objectName, String symbol) {
-    if (!attached) {
-      return null;
-    }
-    return newAddress(lookupInProcess(objectName, symbol));
-  }
-
-  /** From the SymbolLookup interface via Debugger and JVMDebugger */
-  public synchronized OopHandle lookupOop(String objectName, String symbol) {
-    Address addr = lookup(objectName, symbol);
-    if (addr == null) {
-      return null;
-    }
-    return addr.addOffsetToAsOopHandle(0);
-  }
-
-  /** From the Debugger interface */
-  public MachineDescription getMachineDescription() {
-    return machDesc;
-  }
-
-  //--------------------------------------------------------------------------------
-  // Implementation of ThreadAccess interface
-  //
-
-  /** From the ThreadAccess interface via Debugger and JVMDebugger */
-  public ThreadProxy getThreadForIdentifierAddress(Address addr) {
-    return new Win32Thread(this, addr);
-  }
-
-  public ThreadProxy getThreadForThreadId(long handle) {
-    return new Win32Thread(this, handle);
-  }
-
-  //----------------------------------------------------------------------
-  // Overridden from DebuggerBase because we need to relax alignment
-  // constraints on x86
-
-  public long readJLong(long address)
-    throws UnmappedAddressException, UnalignedAddressException {
-    checkJavaConfigured();
-    // FIXME: allow this to be configurable. Undesirable to add a
-    // dependency on the runtime package here, though, since this
-    // package should be strictly underneath it.
-    //    utils.checkAlignment(address, jlongSize);
-    utils.checkAlignment(address, jintSize);
-    byte[] data = readBytes(address, jlongSize);
-    return utils.dataToJLong(data, jlongSize);
-  }
-
-  //--------------------------------------------------------------------------------
-  // Internal routines (for implementation of Win32Address).
-  // These must not be called until the MachineDescription has been set up.
-  //
-
-  /** From the Win32Debugger interface */
-  public String addressValueToString(long address) {
-    return utils.addressValueToString(address);
-  }
-
-  /** From the Win32Debugger interface */
-  public Win32Address readAddress(long address)
-    throws UnmappedAddressException, UnalignedAddressException {
-    return (Win32Address) newAddress(readAddressValue(address));
-  }
-
-  public Win32Address readCompOopAddress(long address)
-    throws UnmappedAddressException, UnalignedAddressException {
-    return (Win32Address) newAddress(readCompOopAddressValue(address));
-  }
-
-  /** From the Win32Debugger interface */
-  public Win32OopHandle readOopHandle(long address)
-    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
-    long value = readAddressValue(address);
-    return (value == 0 ? null : new Win32OopHandle(this, value));
-  }
-  public Win32OopHandle readCompOopHandle(long address)
-    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
-    long value = readCompOopAddressValue(address);
-    return (value == 0 ? null : new Win32OopHandle(this, value));
-  }
-
-  /** From the Win32Debugger interface */
-  public void writeAddress(long address, Win32Address value) {
-    writeAddressValue(address, getAddressValue(value));
-  }
-
-  /** From the Win32Debugger interface */
-  public void writeOopHandle(long address, Win32OopHandle value) {
-    writeAddressValue(address, getAddressValue(value));
-  }
-
-  //--------------------------------------------------------------------------------
-  // Thread context access
-  //
-
-  public synchronized long[] getThreadIntegerRegisterSet(int threadHandleValue,
-                                                         boolean mustDuplicateHandle)
-    throws DebuggerException {
-    if (!suspended) {
-      throw new DebuggerException("Process not suspended");
-    }
-
-    try {
-      int handle = threadHandleValue;
-      if (mustDuplicateHandle) {
-        printlnToOutput("duphandle 0x" + Integer.toHexString(threadHandleValue));
-        if (!in.parseBoolean()) {
-          throw new DebuggerException("Error duplicating thread handle 0x" + threadHandleValue);
-        }
-        handle = (int) in.parseAddress(); // Must close to avoid leaks
-      }
-      printlnToOutput("getcontext 0x" + Integer.toHexString(handle));
-      if (!in.parseBoolean()) {
-        if (mustDuplicateHandle) {
-          printlnToOutput("closehandle 0x" + Integer.toHexString(handle));
-        }
-        String failMessage = "GetThreadContext failed for thread handle 0x" +
-                             Integer.toHexString(handle);
-        if (mustDuplicateHandle) {
-          failMessage = failMessage + ", duplicated from thread handle " +
-                        Integer.toHexString(threadHandleValue);
-        }
-        throw new DebuggerException(failMessage);
-      }
-      // Otherwise, parse all registers. See
-      // src/os/win32/agent/README-commands.txt for the format.
-      // Note the array we have to return has to match that specified by
-      // X86ThreadContext.java.
-      int numRegs = 22;
-      long[] winRegs = new long[numRegs];
-      for (int i = 0; i < numRegs; i++) {
-        winRegs[i] = in.parseAddress();
-      }
-      if (mustDuplicateHandle) {
-        // Clean up after ourselves
-        printlnToOutput("closehandle 0x" + Integer.toHexString(handle));
-      }
-      // Now create the real return value
-      long[] retval = new long[X86ThreadContext.NPRGREG];
-      retval[X86ThreadContext.EAX] = winRegs[0];
-      retval[X86ThreadContext.EBX] = winRegs[1];
-      retval[X86ThreadContext.ECX] = winRegs[2];
-      retval[X86ThreadContext.EDX] = winRegs[3];
-      retval[X86ThreadContext.ESI] = winRegs[4];
-      retval[X86ThreadContext.EDI] = winRegs[5];
-      retval[X86ThreadContext.EBP] = winRegs[6];
-      retval[X86ThreadContext.ESP] = winRegs[7];
-      retval[X86ThreadContext.EIP] = winRegs[8];
-      retval[X86ThreadContext.DS]  = winRegs[9];
-      retval[X86ThreadContext.ES]  = winRegs[10];
-      retval[X86ThreadContext.FS]  = winRegs[11];
-      retval[X86ThreadContext.GS]  = winRegs[12];
-      retval[X86ThreadContext.CS]  = winRegs[13];
-      retval[X86ThreadContext.SS]  = winRegs[14];
-      retval[X86ThreadContext.EFL] = winRegs[15];
-      retval[X86ThreadContext.DR0] = winRegs[16];
-      retval[X86ThreadContext.DR1] = winRegs[17];
-      retval[X86ThreadContext.DR2] = winRegs[18];
-      retval[X86ThreadContext.DR3] = winRegs[19];
-      retval[X86ThreadContext.DR6] = winRegs[20];
-      retval[X86ThreadContext.DR7] = winRegs[21];
-      return retval;
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  public synchronized void setThreadIntegerRegisterSet(int threadHandleValue,
-                                                       boolean mustDuplicateHandle,
-                                                       long[] context)
-    throws DebuggerException {
-    if (!suspended) {
-      throw new DebuggerException("Process not suspended");
-    }
-
-    try {
-      int handle = threadHandleValue;
-      if (mustDuplicateHandle) {
-        printlnToOutput("duphandle 0x" + Integer.toHexString(threadHandleValue));
-        if (!in.parseBoolean()) {
-          throw new DebuggerException("Error duplicating thread handle 0x" + threadHandleValue);
-        }
-        handle = (int) in.parseAddress(); // Must close to avoid leaks
-      }
-      // Change order of registers to match that of debug server
-      long[] winRegs = new long[context.length];
-      winRegs[0] = context[X86ThreadContext.EAX];
-      winRegs[1] = context[X86ThreadContext.EBX];
-      winRegs[2] = context[X86ThreadContext.ECX];
-      winRegs[3] = context[X86ThreadContext.EDX];
-      winRegs[4] = context[X86ThreadContext.ESI];
-      winRegs[5] = context[X86ThreadContext.EDI];
-      winRegs[6] = context[X86ThreadContext.EBP];
-      winRegs[7] = context[X86ThreadContext.ESP];
-      winRegs[8] = context[X86ThreadContext.EIP];
-      winRegs[9] = context[X86ThreadContext.DS];
-      winRegs[10] = context[X86ThreadContext.ES];
-      winRegs[11] = context[X86ThreadContext.FS];
-      winRegs[12] = context[X86ThreadContext.GS];
-      winRegs[13] = context[X86ThreadContext.CS];
-      winRegs[14] = context[X86ThreadContext.SS];
-      winRegs[15] = context[X86ThreadContext.EFL];
-      winRegs[16] = context[X86ThreadContext.DR0];
-      winRegs[17] = context[X86ThreadContext.DR1];
-      winRegs[18] = context[X86ThreadContext.DR2];
-      winRegs[19] = context[X86ThreadContext.DR3];
-      winRegs[20] = context[X86ThreadContext.DR6];
-      winRegs[21] = context[X86ThreadContext.DR7];
-      StringBuffer cmd = new StringBuffer();
-      cmd.append("setcontext 0x");
-      cmd.append(Integer.toHexString(threadHandleValue));
-      for (int i = 0; i < context.length; i++) {
-        cmd.append(" 0x");
-        cmd.append(Long.toHexString(winRegs[i]));
-      }
-      printlnToOutput(cmd.toString());
-      boolean res = in.parseBoolean();
-      if (mustDuplicateHandle) {
-        printlnToOutput("closehandle 0x" + Integer.toHexString(handle));
-      }
-      if (!res) {
-        String failMessage = "SetThreadContext failed for thread handle 0x" +
-          Integer.toHexString(handle);
-        if (mustDuplicateHandle) {
-          failMessage = failMessage + ", duplicated from thread handle " +
-            Integer.toHexString(threadHandleValue);
-        }
-        throw new DebuggerException(failMessage);
-      }
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  /** Fetches the Win32 LDT_ENTRY for the given thread and selector.
-      This data structure allows the conversion of a segment-relative
-      address to a linear virtual address. For example, it allows the
-      expression of operations like "mov eax, fs:[18h]", which fetches
-      the thread information block, allowing access to the thread
-      ID. */
-  public synchronized Win32LDTEntry getThreadSelectorEntry(int threadHandleValue,
-                                                           boolean mustDuplicateHandle,
-                                                           int selector)
-    throws DebuggerException {
-    try {
-      int handle = threadHandleValue;
-      if (mustDuplicateHandle) {
-        printlnToOutput("duphandle 0x" + Integer.toHexString(threadHandleValue));
-        if (!in.parseBoolean()) {
-          throw new DebuggerException("Error duplicating thread handle 0x" + threadHandleValue);
-        }
-        handle = (int) in.parseAddress(); // Must close to avoid leaks
-      }
-      printlnToOutput("selectorentry 0x" + Integer.toHexString(handle) + " " + selector);
-      if (!in.parseBoolean()) {
-        if (mustDuplicateHandle) {
-          printlnToOutput("closehandle 0x" + Integer.toHexString(handle));
-        }
-        throw new DebuggerException("GetThreadContext failed for thread handle 0x" + handle +
-                                    ", duplicated from thread handle " + threadHandleValue);
-      }
-      // Parse result. See
-      // src/os/win32/agent/README-commands.txt for the format.
-      short limitLow = (short) in.parseAddress();
-      short baseLow  = (short) in.parseAddress();
-      byte  baseMid  = (byte)  in.parseAddress();
-      byte  flags1   = (byte)  in.parseAddress();
-      byte  flags2   = (byte)  in.parseAddress();
-      byte  baseHi   = (byte)  in.parseAddress();
-      return new Win32LDTEntry(limitLow, baseLow, baseMid, flags1, flags2, baseHi);
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  public synchronized List getThreadList() throws DebuggerException {
-    if (!suspended) {
-      throw new DebuggerException("Process not suspended");
-    }
-
-    try {
-      printlnToOutput("threadlist");
-      List ret = new ArrayList();
-      int numThreads = in.parseInt();
-      for (int i = 0; i < numThreads; i++) {
-        int handle = (int) in.parseAddress();
-        ret.add(new Win32Thread(this, handle));
-      }
-      return ret;
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  public synchronized List getLoadObjectList() throws DebuggerException {
-    if (!suspended) {
-      throw new DebuggerException("Process not suspended");
-    }
-
-    try {
-      if (loadObjects == null) {
-        loadObjects  = new ArrayList();
-        nameToDllMap = new HashMap();
-        // Get list of library names and base addresses
-        printlnToOutput("libinfo");
-        int numInfo = in.parseInt();
-
-        for (int i = 0; i < numInfo; i++) {
-          // NOTE: because Win32 is case insensitive, we standardize on
-          // lowercase file names.
-          String  fullPathName = parseString().toLowerCase();
-          Address base         = newAddress(in.parseAddress());
-
-          File   file = new File(fullPathName);
-          long   size = file.length();
-          DLL    dll  = new DLL(this, fullPathName, size, base);
-          String name = file.getName();
-          nameToDllMap.put(name, dll);
-          loadObjects.add(dll);
-        }
-      }
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-
-    return loadObjects;
-  }
-
-  //----------------------------------------------------------------------
-  // Process control access
-  //
-
-  public synchronized void writeBytesToProcess(long startAddress, long numBytes, byte[] data)
-    throws UnmappedAddressException, DebuggerException {
-    try {
-      printToOutput("poke 0x" + Long.toHexString(startAddress) +
-                    " |");
-      writeIntToOutput((int) numBytes);
-      writeToOutput(data, 0, (int) numBytes);
-      printlnToOutput("");
-      if (!in.parseBoolean()) {
-        throw new UnmappedAddressException(startAddress);
-      }
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  public synchronized void suspend() throws DebuggerException {
-    try {
-      if (suspended) {
-        throw new DebuggerException("Process already suspended");
-      }
-      printlnToOutput("suspend");
-      suspended = true;
-      enableCache();
-      reresolveLoadObjects();
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  public synchronized void resume() throws DebuggerException {
-    try {
-      if (!suspended) {
-        throw new DebuggerException("Process not suspended");
-      }
-      disableCache();
-      printlnToOutput("resume");
-      suspended = false;
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  public synchronized boolean isSuspended() throws DebuggerException {
-    return suspended;
-  }
-
-  public synchronized void setBreakpoint(Address addr) throws DebuggerException {
-    if (!suspended) {
-      throw new DebuggerException("Process not suspended");
-    }
-
-    long addrVal = getAddressValue(addr);
-    Long where = new Long(addrVal);
-    if (breakpoints.get(where) != null) {
-      throw new DebuggerException("Breakpoint already set at " + addr);
-    }
-    Byte what = new Byte(readBytes(addrVal, 1)[0]);
-    // Now put 0xCC (int 3) at the target address, fail if can not
-    writeBytesToProcess(addrVal, 1, new byte[] { (byte) 0xCC });
-    // OK, the breakpoint is set.
-    breakpoints.put(where, what);
-  }
-
-  public synchronized void clearBreakpoint(Address addr) throws DebuggerException {
-    if (!suspended) {
-      throw new DebuggerException("Process not suspended");
-    }
-
-    long addrVal = getAddressValue(addr);
-    Long where = new Long(addrVal);
-    Byte what = (Byte) breakpoints.get(where);
-    if (what == null) {
-      throw new DebuggerException("Breakpoint not set at " + addr);
-    }
-    // Put original data back at address
-    writeBytesToProcess(addrVal, 1, new byte[] { what.byteValue() });
-    // OK, breakpoint is cleared
-    breakpoints.remove(where);
-  }
-
-  public synchronized boolean isBreakpointSet(Address addr) throws DebuggerException {
-    return (breakpoints.get(new Long(getAddressValue(addr))) != null);
-  }
-
-  // Following constants taken from winnt.h
-  private static final int EXCEPTION_DEBUG_EVENT  = 1;
-  private static final int LOAD_DLL_DEBUG_EVENT   = 6;
-  private static final int UNLOAD_DLL_DEBUG_EVENT = 7;
-  private static final int EXCEPTION_ACCESS_VIOLATION = 0xC0000005;
-  private static final int EXCEPTION_BREAKPOINT       = 0x80000003;
-  private static final int EXCEPTION_SINGLE_STEP      = 0x80000004;
-
-  public synchronized DebugEvent debugEventPoll() throws DebuggerException {
-    if (curDebugEvent != null) {
-      return curDebugEvent;
-    }
-
-    try {
-      printlnToOutput("pollevent");
-      if (!in.parseBoolean()) {
-        return null;
-      }
-      // Otherwise, got a debug event. Need to figure out what kind it is.
-      int handle = (int) in.parseAddress();
-      ThreadProxy thread = new Win32Thread(this, handle);
-      int code = in.parseInt();
-      DebugEvent ev = null;
-      switch (code) {
-      case LOAD_DLL_DEBUG_EVENT: {
-        Address addr = newAddress(in.parseAddress());
-        ev = BasicDebugEvent.newLoadObjectLoadEvent(thread, addr);
-        break;
-      }
-
-      case UNLOAD_DLL_DEBUG_EVENT: {
-        Address addr = newAddress(in.parseAddress());
-        ev = BasicDebugEvent.newLoadObjectUnloadEvent(thread, addr);
-        break;
-      }
-
-      case EXCEPTION_DEBUG_EVENT: {
-        int exceptionCode = in.parseInt();
-        Address pc = newAddress(in.parseAddress());
-        switch (exceptionCode) {
-        case EXCEPTION_ACCESS_VIOLATION:
-          boolean wasWrite = in.parseBoolean();
-          Address addr = newAddress(in.parseAddress());
-          ev = BasicDebugEvent.newAccessViolationEvent(thread, pc, wasWrite, addr);
-          break;
-
-        case EXCEPTION_BREAKPOINT:
-          ev = BasicDebugEvent.newBreakpointEvent(thread, pc);
-          break;
-
-        case EXCEPTION_SINGLE_STEP:
-          ev = BasicDebugEvent.newSingleStepEvent(thread, pc);
-          break;
-
-        default:
-          ev = BasicDebugEvent.newUnknownEvent(thread,
-                                               "Exception 0x" + Integer.toHexString(exceptionCode) +
-                                               " at PC " + pc);
-          break;
-        }
-        break;
-      }
-
-      default:
-        ev = BasicDebugEvent.newUnknownEvent(thread,
-                                             "Debug event " + code + " occurred");
-        break;
-      }
-      if (Assert.ASSERTS_ENABLED) {
-        Assert.that(ev != null, "Must have created event");
-      }
-      curDebugEvent = ev;
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-
-    return curDebugEvent;
-  }
-
-  public synchronized void debugEventContinue() throws DebuggerException {
-    if (curDebugEvent == null) {
-      throw new DebuggerException("No debug event pending");
-    }
-
-    try {
-      ///////////////////////////////////////////////////////////////////
-      //                                                               //
-      // FIXME: this **must** be modified to handle breakpoint events
-      // properly. Must temporarily remove the breakpoint and enable
-      // single-stepping mode (hiding those single-step events from
-      // the user unless they have been requested; currently there is
-      // no way to request single-step events; and it isn't clear how
-      // to enable them or how the hardware and/or OS typically
-      // supports them, i.e., are they on a per-process or per-thread
-      // level?) until the process steps past the breakpoint, then put
-      // the breakpoint back.
-      //                                                               //
-      ///////////////////////////////////////////////////////////////////
-
-      DebugEvent.Type t = curDebugEvent.getType();
-      boolean shouldPassOn = true;
-      if (t == DebugEvent.Type.BREAKPOINT) {
-        // FIXME: correct algorithm appears to be as follows:
-        //
-        // 1. Check to see whether we know about this breakpoint. If
-        // not, it's requested by the user's program and we should
-        // ignore it (not pass it on to the program).
-        //
-        // 2. Replace the original opcode.
-        //
-        // 3. Set single-stepping mode in the debug registers.
-        //
-        // 4. Back up the PC.
-        //
-        // 5. In debugEventPoll(), watch for a single-step event on
-        // this thread. When we get it, put the breakpoint back. Only
-        // deliver that single-step event if the user has requested
-        // single-step events (FIXME: must figure out whether they are
-        // per-thread or per-process, and also expose a way to turn
-        // them on.)
-
-        // To make breakpoints work for now, we will just back up the
-        // PC, which we have to do in order to not disrupt the program
-        // execution in case the user decides to disable the breakpoint.
-
-        if (breakpoints.get(new Long(getAddressValue(curDebugEvent.getPC()))) != null) {
-          System.err.println("Backing up PC due to breakpoint");
-          X86ThreadContext ctx = (X86ThreadContext) curDebugEvent.getThread().getContext();
-          ctx.setRegister(X86ThreadContext.EIP, ctx.getRegister(X86ThreadContext.EIP) - 1);
-          curDebugEvent.getThread().setContext(ctx);
-        } else {
-          System.err.println("Skipping back up of PC since I didn't know about this breakpoint");
-          System.err.println("Known breakpoints:");
-          for (Iterator iter = breakpoints.keySet().iterator(); iter.hasNext(); ) {
-            System.err.println("  0x" + Long.toHexString(((Long) iter.next()).longValue()));
-          }
-        }
-        shouldPassOn = false;
-      } else if (t == DebugEvent.Type.SINGLE_STEP) {
-        shouldPassOn = false;
-      }
-      // Other kinds of debug events are either ignored if passed on
-      // or probably should be passed on so the program exits
-      // FIXME: generate process exiting events (should be easy)
-
-      int val = (shouldPassOn ? 1 : 0);
-      printlnToOutput("continueevent " + val);
-      if (!in.parseBoolean()) {
-        throw new DebuggerException("Unknown error while attempting to continue past debug event");
-      }
-      curDebugEvent = null;
-    } catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  //--------------------------------------------------------------------------------
-  // Address access
-  //
-
-  /** From the Debugger interface */
-  public long getAddressValue(Address addr) {
-    if (addr == null) return 0;
-    return ((Win32Address) addr).getValue();
-  }
-
-  /** From the Win32Debugger interface */
-  public Address newAddress(long value) {
-    if (value == 0) return null;
-    return new Win32Address(this, value);
-  }
-
-  //--------------------------------------------------------------------------------
-  // Internals only below this point
-  //
-
-  private String parseString() throws IOException {
-    int charSize = in.parseInt();
-    int numChars = in.parseInt();
-    in.skipByte();
-    String str;
-    if (charSize == 1) {
-      str = in.readByteString(numChars);
-    } else {
-      str = in.readCharString(numChars);
-    }
-    return str;
-  }
-
-  /** Looks up an address in the remote process's address space.
-      Returns 0 if symbol not found or upon error. Package private to
-      allow Win32DebuggerRemoteIntfImpl access. NOTE that this returns
-      a long instead of an Address because we do not want to serialize
-      Addresses. */
-  synchronized long lookupInProcess(String objectName, String symbol) {
-    // NOTE: this assumes that process is suspended (which is probably
-    // necessary assumption given that DLLs can be loaded/unloaded as
-    // process runs). Should update documentation.
-    if (nameToDllMap == null) {
-      getLoadObjectList();
-    }
-    DLL dll = (DLL) nameToDllMap.get(objectName);
-    // The DLL can be null because we use this to search through known
-    // DLLs in HotSpotTypeDataBase (for example)
-    if (dll != null) {
-      Win32Address addr = (Win32Address) dll.lookupSymbol(symbol);
-      if (addr != null) {
-        return addr.getValue();
-      }
-    }
-    return 0;
-  }
-
-  /** This reads bytes from the remote process. */
-  public synchronized ReadResult readBytesFromProcess(long address, long numBytes)
-    throws UnmappedAddressException, DebuggerException {
-    try {
-      String cmd = "peek " + utils.addressValueToString(address) + " " + numBytes;
-      printlnToOutput(cmd);
-      while (in.readByte() != 'B') {
-      }
-      byte res = in.readByte();
-      if (res == 0) {
-        System.err.println("Failing command: " + cmd);
-        throw new DebuggerException("Read of remote process address space failed");
-      }
-      // NOTE: must read ALL of the data regardless of whether we need
-      // to throw an UnmappedAddressException. Otherwise will corrupt
-      // the input stream each time we have a failure. Not good. Do
-      // not want to risk "flushing" the input stream in case a huge
-      // read has a hangup in the middle and we leave data on the
-      // stream.
-      byte[] buf = new byte[(int) numBytes];
-      boolean bailOut = false;
-      long failureAddress = 0;
-      while (numBytes > 0) {
-        long len = in.readUnsignedInt();
-        boolean isMapped = ((in.readByte() == 0) ? false : true);
-        if (!isMapped) {
-          if (!bailOut) {
-            bailOut = true;
-            failureAddress = address;
-          }
-        } else {
-          // This won't work if we have unmapped regions, but if we do
-          // then we're going to throw an exception anyway
-
-          // NOTE: there is a factor of 20 speed difference between
-          // these two ways of doing this read.
-          in.readBytes(buf, 0, (int) len);
-        }
-
-        // Do NOT do this:
-        //        for (int i = 0; i < (int) len; i++) {
-        //          buf[i] = in.readByte();
-        //        }
-
-        numBytes -= len;
-        address += len;
-      }
-      if (Assert.ASSERTS_ENABLED) {
-        Assert.that(numBytes == 0, "Bug in debug server's implementation of peek");
-      }
-      if (bailOut) {
-        return new ReadResult(failureAddress);
-      }
-      return new ReadResult(buf);
-    }
-    catch (IOException e) {
-      throw new DebuggerException(e);
-    }
-  }
-
-  /** Convenience routines */
-  private void printlnToOutput(String s) throws IOException {
-    out.println(s);
-    if (out.checkError()) {
-      throw new IOException("Error occurred while writing to debug server");
-    }
-  }
-
-  private void printToOutput(String s) throws IOException {
-    out.print(s);
-    if (out.checkError()) {
-      throw new IOException("Error occurred while writing to debug server");
-    }
-  }
-
-  private void writeIntToOutput(int val) throws IOException {
-    rawOut.writeInt(val);
-    rawOut.flush();
-  }
-
-  private void writeToOutput(byte[] buf, int off, int len) throws IOException {
-    rawOut.write(buf, off, len);
-    rawOut.flush();
-  }
-
-  /** Connects to the debug server, setting up out and in streams. */
-  private void connectToDebugServer() throws IOException {
-    // Try for a short period of time to connect to debug server; time out
-    // with failure if didn't succeed
-    debuggerSocket = null;
-    long endTime = System.currentTimeMillis() + SHORT_TIMEOUT;
-
-    while ((debuggerSocket == null) && (System.currentTimeMillis() < endTime)) {
-      try {
-        // FIXME: this does not work if we are on a DHCP machine which
-        // did not get an IP address this session. It appears to use
-        // an old cached address and the connection does not actually
-        // succeed. Must file a bug.
-        // debuggerSocket = new Socket(InetAddress.getLocalHost(), PORT);
-        debuggerSocket = new Socket(InetAddress.getByName("127.0.0.1"), PORT);
-        debuggerSocket.setTcpNoDelay(true);
-      }
-      catch (IOException e) {
-        // Swallow IO exceptions while attempting connection
-        debuggerSocket = null;
-        try {
-          // Don't swamp the CPU
-          Thread.sleep(750);
-        }
-        catch (InterruptedException ex) {
-        }
-      }
-    }
-
-    if (debuggerSocket == null) {
-      // Failed to connect because of timeout
-      throw new DebuggerException("Timed out while attempting to connect to debug server (please start SwDbgSrv.exe)");
-    }
-
-    out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(debuggerSocket.getOutputStream(), "US-ASCII")), true);
-    rawOut = new DataOutputStream(new BufferedOutputStream(debuggerSocket.getOutputStream()));
-    in = new InputLexer(new BufferedInputStream(debuggerSocket.getInputStream()));
-  }
-
-  private DLL findDLLByName(String fullPathName) {
-    for (Iterator iter = loadObjects.iterator(); iter.hasNext(); ) {
-      DLL dll = (DLL) iter.next();
-      if (dll.getName().equals(fullPathName)) {
-        return dll;
-      }
-    }
-    return null;
-  }
-
-  private void reresolveLoadObjects() throws DebuggerException {
-    try {
-      // It is too expensive to throw away the loadobject list every
-      // time the process is suspended, largely because of debug
-      // information re-parsing. When we suspend the target process we
-      // instead fetch the list of loaded libraries in the target and
-      // see whether any loadobject needs to be thrown away (because it
-      // was unloaded) or invalidated (because it was unloaded and
-      // reloaded at a different target address). Note that we don't
-      // properly handle the case of a loaded DLL being unloaded,
-      // recompiled, and reloaded. We could handle this by keeping a
-      // time stamp.
-
-      if (loadObjects == null) {
-        return;
-      }
-
-      // Need to create new list since have to figure out which ones
-      // were unloaded
-      List newLoadObjects = new ArrayList();
-
-    // Get list of library names and base addresses
-      printlnToOutput("libinfo");
-      int numInfo = in.parseInt();
-
-      for (int i = 0; i < numInfo; i++) {
-        // NOTE: because Win32 is case insensitive, we standardize on
-        // lowercase file names.
-        String  fullPathName = parseString().toLowerCase();
-        Address base         = newAddress(in.parseAddress());
-
-        // Look for full path name in DLL list
-        DLL dll = findDLLByName(fullPathName);
-        boolean mustLoad = true;
-        if (dll != null) {
-          loadObjects.remove(dll);
-
-          // See whether base addresses match; otherwise, need to reload
-          if (AddressOps.equal(base, dll.getBase())) {
-            mustLoad = false;
-          }
-        }
-
-        if (mustLoad) {
-          // Create new DLL
-          File   file = new File(fullPathName);
-          long   size = file.length();
-          String name = file.getName();
-          dll  = new DLL(this, fullPathName, size, base);
-          nameToDllMap.put(name, dll);
-        }
-        newLoadObjects.add(dll);
-      }
-
-      // All remaining entries in loadObjects have to be removed from
-      // the nameToDllMap
-      for (Iterator dllIter = loadObjects.iterator(); dllIter.hasNext(); ) {
-        DLL dll = (DLL) dllIter.next();
-        for (Iterator iter = nameToDllMap.keySet().iterator(); iter.hasNext(); ) {
-          String name = (String) iter.next();
-          if (nameToDllMap.get(name) == dll) {
-            nameToDllMap.remove(name);
-            break;
-          }
-        }
-      }
-
-      loadObjects = newLoadObjects;
-    } catch (IOException e) {
-      loadObjects = null;
-      nameToDllMap = null;
-      throw new DebuggerException(e);
-    }
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32LDTEntry.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import java.io.Serializable;
-
-/** Describes an LDT entry. (Some of the descriptions are taken
-    directly from Microsoft's documentation and are copyrighted by
-    Microsoft.) */
-
-class Win32LDTEntry implements Serializable {
-  private short limitLow;
-  private short baseLow;
-  private byte  baseMid;
-  private byte  flags1;
-  private byte  flags2;
-  private byte  baseHi;
-
-  private Win32LDTEntry() {}
-
-  public Win32LDTEntry(short limitLow,
-                       short baseLow,
-                       byte  baseMid,
-                       byte  flags1,
-                       byte  flags2,
-                       byte  baseHi) {
-    this.limitLow = limitLow;
-    this.baseLow  = baseLow;
-    this.baseMid  = baseMid;
-    this.flags1   = flags1;
-    this.flags2   = flags2;
-    this.baseHi   = baseHi;
-  }
-
-  /** Returns base address of segment */
-  public long  getBase()     { return ( (baseLow & 0xFFFF)       |
-                                       ((baseMid & 0xFF) << 16)  |
-                                       ((baseHi  & 0xFF) << 24)) & 0xFFFFFFFF; }
-
-  public short getLimitLow() { return limitLow; }
-  public short getBaseLow()  { return baseLow; }
-  public byte  getBaseMid()  { return baseMid; }
-  public byte  getBaseHi()   { return baseHi; }
-
-  // FIXME: must verify mask and shift are correct
-  /** Describes type of segment. See TYPE_ portion of {@link
-      sun.jvm.hotspot.debugger.win32.Win32LDTEntryConstants}. */
-  public int   getType()     { return (flags1 & 0x1F); }
-
-  // FIXME: verify mask and shift are correct
-  /** Privilege level of descriptor: 0 = most privileged, 3 = least privileged */
-  public int   getPrivilegeLevel() { return ((flags1 & 0x60) >> 5); }
-
-  // FIXME: verify mask is correct
-  /** Is segment present in physical memory? */
-  public boolean isSegmentPhysical() { return ((flags1 & 0x70) != 0); }
-
-  // FIXME: verify mask and shift are correct
-  /** High bits (16-19) of the address of the last byte of the segment */
-  public int getLimitHi() { return (flags2 & 0x0F); }
-
-  // FIXME: verify mask is correct
-  /** <P> Size of segment. If the segment is a data segment, this
-      member contains 1 if the segment is larger than 64 kilobytes (K)
-      or 0 if the segment is smaller than or equal to 64K. </P>
-
-      <P> If the segment is a code segment, this member contains 1 if
-      the segment is a code segment and runs with the default (native
-      mode) instruction set. This member contains 0 if the code
-      segment is an 80286 code segment and runs with 16-bit offsets
-      and the 80286-compatible instruction set. </P> */
-  public boolean isDefaultBig() { return ((flags2 & 0x40) != 0); }
-
-  // FIXME: verify mask is correct
-  /** Returns true if segment is page granular, false if byte
-      granular. */
-  public boolean isPageGranular() { return ((flags2 & 0x80) != 0); }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32LDTEntryConstants.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-/** Enumerates flags in Win32LDTEntry */
-
-interface Win32LDTEntryConstants {
-  // Types of segments
-  public static final int TYPE_READ_ONLY_DATA                      = 0;
-  public static final int TYPE_READ_WRITE_DATA                     = 1;
-  public static final int TYPE_UNUSED                              = 2;
-  public static final int TYPE_READ_WRITE_EXPAND_DOWN_DATA         = 3;
-  public static final int TYPE_EXECUTE_ONLY_CODE                   = 4;
-  public static final int TYPE_EXECUTABLE_READABLE_CODE            = 5;
-  public static final int TYPE_EXECUTE_ONLY_CONFORMING_CODE        = 6;
-  public static final int TYPE_EXECUTABLE_READABLE_CONFORMING_CODE = 7;
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32OopHandle.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import sun.jvm.hotspot.debugger.*;
-
-class Win32OopHandle extends Win32Address implements OopHandle {
-  Win32OopHandle(Win32Debugger debugger, long addr) {
-    super(debugger, addr);
-  }
-
-  public boolean equals(Object arg) {
-    if (arg == null) {
-      return false;
-    }
-
-    if (!(arg instanceof Win32OopHandle)) {
-      return false;
-    }
-
-    return (addr == ((Win32Address) arg).addr);
-  }
-
-  public Address    addOffsetTo       (long offset) throws UnsupportedOperationException {
-    throw new UnsupportedOperationException("addOffsetTo not applicable to OopHandles (interior object pointers not allowed)");
-  }
-
-  public Address    andWithMask(long mask) throws UnsupportedOperationException {
-    throw new UnsupportedOperationException("andWithMask not applicable to OopHandles (i.e., anything but C addresses)");
-  }
-
-  public Address    orWithMask(long mask) throws UnsupportedOperationException {
-    throw new UnsupportedOperationException("orWithMask not applicable to OopHandles (i.e., anything but C addresses)");
-  }
-
-  public Address    xorWithMask(long mask) throws UnsupportedOperationException {
-    throw new UnsupportedOperationException("xorWithMask not applicable to OopHandles (i.e., anything but C addresses)");
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Thread.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2001, 2004, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.x86.*;
-
-class Win32Thread implements ThreadProxy {
-  private Win32Debugger debugger;
-  private int           handle;
-  private boolean       mustDuplicate;
-  private boolean       gotID;
-  private int           id;
-
-  /** The address argument must be the address of the HANDLE of the
-      desired thread in the target process. */
-  Win32Thread(Win32Debugger debugger, Address addr) {
-    this.debugger = debugger;
-    // FIXME: size of data fetched here should be configurable.
-    // However, making it so would produce a dependency on the "types"
-    // package from the debugger package, which is not desired.
-    this.handle   = (int) addr.getCIntegerAt(0, 4, true);
-    // Thread handles in the target process must be duplicated before
-    // fetching their contexts
-    mustDuplicate = true;
-    gotID = false;
-  }
-
-  /** The integer argument must be the value of a HANDLE received from
-      the "threadlist" operation. */
-  Win32Thread(Win32Debugger debugger, long handle) {
-    this.debugger = debugger;
-    this.handle   = (int) handle;
-    mustDuplicate = false;
-    gotID         = false;
-  }
-
-  public ThreadContext getContext() throws IllegalThreadStateException {
-    if (!debugger.isSuspended()) {
-      throw new IllegalThreadStateException("Target process must be suspended");
-    }
-    long[] data = debugger.getThreadIntegerRegisterSet(handle, mustDuplicate);
-    Win32ThreadContext context = new Win32ThreadContext(debugger);
-    for (int i = 0; i < data.length; i++) {
-      context.setRegister(i, data[i]);
-    }
-    return context;
-  }
-
-  public boolean canSetContext() throws DebuggerException {
-    return true;
-  }
-
-  public void setContext(ThreadContext thrCtx)
-    throws IllegalThreadStateException, DebuggerException {
-    if (!debugger.isSuspended()) {
-      throw new IllegalThreadStateException("Target process must be suspended");
-    }
-    X86ThreadContext context = (X86ThreadContext) thrCtx;
-    long[] data = new long[X86ThreadContext.NPRGREG];
-    for (int i = 0; i < data.length; i++) {
-      data[i] = context.getRegister(i);
-    }
-    debugger.setThreadIntegerRegisterSet(handle, mustDuplicate, data);
-  }
-
-  public boolean equals(Object obj) {
-    if ((obj == null) || !(obj instanceof Win32Thread)) {
-      return false;
-    }
-
-    return (((Win32Thread) obj).getThreadID() == getThreadID());
-  }
-
-  public int hashCode() {
-    return getThreadID();
-  }
-
-  public String toString() {
-    return Integer.toString(getThreadID());
-  }
-
-  /** Retrieves the thread ID of this thread by examining the Thread
-      Information Block. */
-  private int getThreadID() {
-    if (!gotID) {
-      try {
-        // Get thread context
-        X86ThreadContext context = (X86ThreadContext) getContext();
-        // Get LDT entry for FS register
-        Win32LDTEntry ldt =
-          debugger.getThreadSelectorEntry(handle,
-                                          mustDuplicate,
-                                          (int) context.getRegister(X86ThreadContext.FS));
-        // Get base address of segment = Thread Environment Block (TEB)
-        Address teb = debugger.newAddress(ldt.getBase());
-        // Thread ID is at offset 0x24
-        id = (int) teb.getCIntegerAt(0x24, 4, true);
-        gotID = true;
-      } catch (AddressException e) {
-        throw new DebuggerException(e);
-      }
-    }
-
-    return id;
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32ThreadContext.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.debugger.win32;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.x86.*;
-
-class Win32ThreadContext extends X86ThreadContext {
-  private Win32Debugger debugger;
-
-  public Win32ThreadContext(Win32Debugger debugger) {
-    super();
-    this.debugger = debugger;
-  }
-
-  public void setRegisterAsAddress(int index, Address value) {
-    setRegister(index, debugger.getAddressValue(value));
-  }
-
-  public Address getRegisterAsAddress(int index) {
-    return debugger.newAddress(getRegister(index));
-  }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.gc_interface.CollectedHeapName;
+import sun.jvm.hotspot.memory.MemRegion;
+import sun.jvm.hotspot.memory.SharedHeap;
+import sun.jvm.hotspot.memory.SpaceClosure;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for G1CollectedHeap.
+
+public class G1CollectedHeap extends SharedHeap {
+    // HeapRegionSeq _seq;
+    static private long hrsFieldOffset;
+    // MemRegion _g1_committed;
+    static private long g1CommittedFieldOffset;
+    // size_t _summary_bytes_used;
+    static private CIntegerField summaryBytesUsedField;
+    // G1MonitoringSupport* _g1mm
+    static private AddressField g1mmField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("G1CollectedHeap");
+
+        hrsFieldOffset = type.getField("_hrs").getOffset();
+        g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
+        summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+        g1mmField = type.getAddressField("_g1mm");
+    }
+
+    public long capacity() {
+        Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
+        MemRegion g1_committed = new MemRegion(g1CommittedAddr);
+        return g1_committed.byteSize();
+    }
+
+    public long used() {
+        return summaryBytesUsedField.getValue(addr);
+    }
+
+    public long n_regions() {
+        return hrs().length();
+    }
+
+    private HeapRegionSeq hrs() {
+        Address hrsAddr = addr.addOffsetTo(hrsFieldOffset);
+        return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class,
+                                                         hrsAddr);
+    }
+
+    public G1MonitoringSupport g1mm() {
+        Address g1mmAddr = g1mmField.getValue(addr);
+        return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
+    }
+
+    private Iterator<HeapRegion> heapRegionIterator() {
+        return hrs().heapRegionIterator();
+    }
+
+    public void heapRegionIterate(SpaceClosure scl) {
+        Iterator<HeapRegion> iter = heapRegionIterator();
+        while (iter.hasNext()) {
+            HeapRegion hr = iter.next();
+            scl.doSpace(hr);
+        }
+    }
+
+    public CollectedHeapName kind() {
+        return CollectedHeapName.G1_COLLECTED_HEAP;
+    }
+
+    public G1CollectedHeap(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1MonitoringSupport.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for G1MonitoringSupport.
+
+public class G1MonitoringSupport extends VMObject {
+    // size_t _eden_committed;
+    static private CIntegerField edenCommittedField;
+    // size_t _eden_used;
+    static private CIntegerField edenUsedField;
+    // size_t _survivor_committed;
+    static private CIntegerField survivorCommittedField;
+    // size_t _survivor_used;
+    static private CIntegerField survivorUsedField;
+    // size_t _old_committed;
+    static private CIntegerField oldCommittedField;
+    // size_t _old_used;
+    static private CIntegerField oldUsedField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("G1MonitoringSupport");
+
+        edenCommittedField = type.getCIntegerField("_eden_committed");
+        edenUsedField = type.getCIntegerField("_eden_used");
+        survivorCommittedField = type.getCIntegerField("_survivor_committed");
+        survivorUsedField = type.getCIntegerField("_survivor_used");
+        oldCommittedField = type.getCIntegerField("_old_committed");
+        oldUsedField = type.getCIntegerField("_old_used");
+    }
+
+    public long edenCommitted() {
+        return edenCommittedField.getValue(addr);
+    }
+
+    public long edenUsed() {
+        return edenUsedField.getValue(addr);
+    }
+
+    public long survivorCommitted() {
+        return survivorCommittedField.getValue(addr);
+    }
+
+    public long survivorUsed() {
+        return survivorUsedField.getValue(addr);
+    }
+
+    public long oldCommitted() {
+        return oldCommittedField.getValue(addr);
+    }
+
+    public long oldUsed() {
+        return oldUsedField.getValue(addr);
+    }
+
+    public G1MonitoringSupport(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.memory.ContiguousSpace;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegion. Currently we don't actually include
+// any of its fields but only iterate over it (which we get "for free"
+// as HeapRegion ultimately inherits from ContiguousSpace).
+
+public class HeapRegion extends ContiguousSpace {
+    // static int GrainBytes;
+    static private CIntegerField grainBytesField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegion");
+
+        grainBytesField = type.getCIntegerField("GrainBytes");
+    }
+
+    static public long grainBytes() {
+        return grainBytesField.getValue();
+    }
+
+    public HeapRegion(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map.
+
+public class HeapRegionSeq extends VMObject {
+    // HeapRegion** _regions;
+    static private AddressField regionsField;
+    // size_t _length;
+    static private CIntegerField lengthField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegionSeq");
+
+        regionsField = type.getAddressField("_regions");
+        lengthField = type.getCIntegerField("_length");
+    }
+
+    private HeapRegion at(long index) {
+        Address arrayAddr = regionsField.getValue(addr);
+        // Offset of &_region[index]
+        long offset = index * VM.getVM().getAddressSize();
+        Address regionAddr = arrayAddr.getAddressAt(offset);
+        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
+                                                      regionAddr);
+    }
+
+    public long length() {
+        return lengthField.getValue(addr);
+    }
+
+    private class HeapRegionIterator implements Iterator<HeapRegion> {
+        private long index;
+        private long length;
+
+        @Override
+        public boolean hasNext() { return index < length; }
+
+        @Override
+        public HeapRegion next() { return at(index++);    }
+
+        @Override
+        public void remove()     { /* not supported */    }
+
+        HeapRegionIterator(Address addr) {
+            index = 0;
+            length = length();
+        }
+    }
+
+    public Iterator<HeapRegion> heapRegionIterator() {
+        return new HeapRegionIterator(addr);
+    }
+
+    public HeapRegionSeq(Address addr) {
+        super(addr);
+    }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
   public static final CollectedHeapName ABSTRACT = new CollectedHeapName("abstract");
   public static final CollectedHeapName SHARED_HEAP = new CollectedHeapName("SharedHeap");
   public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
+  public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
   public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
 
   public String toString() {
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecode.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -26,6 +26,7 @@
 
 import sun.jvm.hotspot.oops.*;
 import sun.jvm.hotspot.utilities.*;
+import sun.jvm.hotspot.runtime.VM;
 
 public class Bytecode {
   Method method;
@@ -45,6 +46,23 @@
     return Bits.roundTo(bci + offset, jintSize) - bci;
   }
 
+  public int     getIndexU1()               { return method.getBytecodeOrBPAt(bci() + 1) & 0xFF; }
+  public int     getIndexU2(int bc, boolean isWide) {
+    if (can_use_native_byte_order(bc, isWide)) {
+      return method.getNativeShortArg(bci() + (isWide ? 2 : 1)) & 0xFFFF;
+    }
+    return method.getBytecodeShortArg(bci() + (isWide ? 2 : 1)) & 0xFFFF;
+  }
+  public int     getIndexU4()               { return method.getNativeIntArg(bci() + 1); }
+  public boolean hasIndexU4()               { return code() == Bytecodes._invokedynamic; }
+
+  public int     getIndexU1Cpcache()        { return method.getBytecodeOrBPAt(bci() + 1) & 0xFF; }
+  public int     getIndexU2Cpcache()        { return method.getNativeShortArg(bci() + 1) & 0xFFFF; }
+
+  static boolean can_use_native_byte_order(int bc, boolean is_wide) {
+    return (VM.getVM().isBigEndian() || Bytecodes.native_byte_order(bc /*, is_wide*/));
+  }
+
   int javaSignedWordAt(int offset) {
     return method.getBytecodeIntArg(bci + offset);
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeFastAAccess0.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.interpreter;
-
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.utilities.*;
-
-public class BytecodeFastAAccess0 extends BytecodeGetPut {
-  BytecodeFastAAccess0(Method method, int bci) {
-    super(method, bci);
-  }
-
-  public int index() {
-    return (int) (0xFF & javaShortAt(2));
-  }
-
-  public boolean isStatic() {
-    return false;
-  }
-
-  public void verify() {
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(isValid(), "check fast_aaccess_0");
-    }
-  }
-
-  public boolean isValid() {
-    return code() == Bytecodes._fast_aaccess_0;
-  }
-
-  public static BytecodeFastAAccess0 at(Method method, int bci) {
-    BytecodeFastAAccess0 b = new BytecodeFastAAccess0(method, bci);
-    if (Assert.ASSERTS_ENABLED) {
-      b.verify();
-    }
-    return b;
-  }
-
-  /** Like at, but returns null if the BCI is not at fast_aaccess_0  */
-  public static BytecodeFastAAccess0 atCheck(Method method, int bci) {
-    BytecodeFastAAccess0 b = new BytecodeFastAAccess0(method, bci);
-    return (b.isValid() ? b : null);
-  }
-
-  public static BytecodeFastAAccess0 at(BytecodeStream bcs) {
-    return new BytecodeFastAAccess0(bcs.method(), bcs.bci());
-  }
-
-  public String toString() {
-    StringBuffer buf = new StringBuffer();
-    buf.append("aload_0");
-    buf.append(spaces);
-    buf.append(super.toString());
-    return buf.toString();
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeFastIAccess0.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.interpreter;
-
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.utilities.*;
-
-public class BytecodeFastIAccess0 extends BytecodeGetPut {
-  BytecodeFastIAccess0(Method method, int bci) {
-    super(method, bci);
-  }
-
-  public int index() {
-    return (int) (0xFF & javaShortAt(2));
-  }
-
-  public boolean isStatic() {
-    return false;
-  }
-
-  public void verify() {
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(isValid(), "check fast_iaccess_0");
-    }
-  }
-
-  public boolean isValid() {
-    return code() == Bytecodes._fast_iaccess_0;
-  }
-
-  public static BytecodeFastIAccess0 at(Method method, int bci) {
-    BytecodeFastIAccess0 b = new BytecodeFastIAccess0(method, bci);
-    if (Assert.ASSERTS_ENABLED) {
-      b.verify();
-    }
-    return b;
-  }
-
-  /** Like at, but returns null if the BCI is not at fast_iaccess_0  */
-  public static BytecodeFastIAccess0 atCheck(Method method, int bci) {
-    BytecodeFastIAccess0 b = new BytecodeFastIAccess0(method, bci);
-    return (b.isValid() ? b : null);
-  }
-
-  public static BytecodeFastIAccess0 at(BytecodeStream bcs) {
-    return new BytecodeFastIAccess0(bcs.method(), bcs.bci());
-  }
-
-  public String toString() {
-    StringBuffer buf = new StringBuffer();
-    buf.append("aload_0");
-    buf.append(spaces);
-    buf.append(super.toString());
-    return buf.toString();
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java	Thu Dec 22 15:46:11 2011 +0000
@@ -28,29 +28,25 @@
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.utilities.*;
 
-public class BytecodeLoadConstant extends BytecodeWithCPIndex {
+public class BytecodeLoadConstant extends Bytecode {
   BytecodeLoadConstant(Method method, int bci) {
     super(method, bci);
   }
 
   public boolean hasCacheIndex() {
     // normal ldc uses CP index, but fast_aldc uses swapped CP cache index
-    return javaCode() != code();
+    return code() >= Bytecodes.number_of_java_codes;
   }
 
-  public int index() {
-    int i = javaCode() == Bytecodes._ldc ?
-                 (int) (0xFF & javaByteAt(1))
-               : (int) (0xFFFF & javaShortAt(1));
-    if (hasCacheIndex()) {
-      return (0xFFFF & VM.getVM().getBytes().swapShort((short) i));
-    } else {
-      return i;
-    }
+  int rawIndex() {
+    if (javaCode() == Bytecodes._ldc)
+      return getIndexU1();
+    else
+      return getIndexU2(code(), false);
   }
 
   public int poolIndex() {
-    int i = index();
+    int i = rawIndex();
     if (hasCacheIndex()) {
       ConstantPoolCache cpCache = method().getConstants().getCache();
       return cpCache.getEntryAt(i).getConstantPoolIndex();
@@ -61,12 +57,18 @@
 
   public int cacheIndex() {
     if (hasCacheIndex()) {
-      return index();
+      return rawIndex();
     } else {
       return -1;  // no cache index
     }
   }
 
+  public BasicType resultType() {
+    int index = poolIndex();
+    ConstantTag tag = method().getConstants().getTagAt(index);
+    return tag.basicType();
+  }
+
   private Oop getCachedConstant() {
     int i = cacheIndex();
     if (i >= 0) {
@@ -88,7 +90,7 @@
            jcode == Bytecodes._ldc2_w;
     if (! codeOk) return false;
 
-    ConstantTag ctag = method().getConstants().getTagAt(index());
+    ConstantTag ctag = method().getConstants().getTagAt(poolIndex());
     if (jcode == Bytecodes._ldc2_w) {
        // has to be double or long
        return (ctag.isDouble() || ctag.isLong()) ? true: false;
@@ -107,7 +109,7 @@
        return false;
     }
 
-    ConstantTag ctag = method().getConstants().getTagAt(index());
+    ConstantTag ctag = method().getConstants().getTagAt(poolIndex());
     return ctag.isKlass() || ctag.isUnresolvedKlass();
   }
 
@@ -120,7 +122,7 @@
     // We just look at the object at the corresponding index and
     // decide based on the oop type.
     ConstantPool cpool = method().getConstants();
-    int cpIndex = index();
+    int cpIndex = poolIndex();
     ConstantPool.CPSlot oop = cpool.getSlotAt(cpIndex);
     if (oop.isOop()) {
       return (Klass) oop.getOop();
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeStream.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeStream.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2002, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -130,7 +130,13 @@
   public int     getIndex()           { return (isWide())
                                           ? (_method.getBytecodeShortArg(bci() + 2) & 0xFFFF)
                                           : (_method.getBytecodeOrBPAt(bci() + 1) & 0xFF); }
-  public int     getIndexBig()        { return _method.getBytecodeShortArg(bci() + 1); }
+  public int     getIndexU1()         { return _method.getBytecodeOrBPAt(bci() + 1) & 0xFF; }
+  public int     getIndexU2()         { return _method.getBytecodeShortArg(bci() + 1) & 0xFFFF; }
+  public int     getIndexU4()         { return _method.getNativeIntArg(bci() + 1); }
+  public boolean hasIndexU4()         { return code() == Bytecodes._invokedynamic; }
+
+  public int     getIndexU1Cpcache()         { return _method.getBytecodeOrBPAt(bci() + 1) & 0xFF; }
+  public int     getIndexU2Cpcache()         { return _method.getNativeShortArg(bci() + 1) & 0xFFFF; }
 
   // Fetch at absolute BCI (for manual parsing of certain bytecodes)
   public int     codeAt(int bci) {
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWideable.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWideable.java	Thu Dec 22 15:46:11 2011 +0000
@@ -38,7 +38,6 @@
 
   // the local variable index
   public int getLocalVarIndex() {
-    return (isWide()) ? (int) (0xFFFF & javaShortAt(1))
-            : (int) (0xFF & javaByteAt(1));
+    return (isWide()) ? getIndexU2(code(), true) : getIndexU1();
   }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java	Thu Dec 22 15:46:11 2011 +0000
@@ -35,7 +35,7 @@
   }
 
   // the constant pool index for this bytecode
-  public int index() { return 0xFFFF & javaShortAt(1); }
+  public int index() { return getIndexU2(code(), false); }
 
   public int getSecondaryIndex() {
      throw new IllegalArgumentException("must be invokedynamic");
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java	Thu Dec 22 15:46:11 2011 +0000
@@ -276,6 +276,34 @@
 
   public static final int number_of_codes       = 233;
 
+  // Flag bits derived from format strings, can_trap, can_rewrite, etc.:
+  // semantic flags:
+  static final int  _bc_can_trap      = 1<<0;     // bytecode execution can trap or block
+  static final int  _bc_can_rewrite   = 1<<1;     // bytecode execution has an alternate form
+
+  // format bits (determined only by the format string):
+  static final int  _fmt_has_c        = 1<<2;     // constant, such as sipush "bcc"
+  static final int  _fmt_has_j        = 1<<3;     // constant pool cache index, such as getfield "bjj"
+  static final int  _fmt_has_k        = 1<<4;     // constant pool index, such as ldc "bk"
+  static final int  _fmt_has_i        = 1<<5;     // local index, such as iload
+  static final int  _fmt_has_o        = 1<<6;     // offset, such as ifeq
+  static final int  _fmt_has_nbo      = 1<<7;     // contains native-order field(s)
+  static final int  _fmt_has_u2       = 1<<8;     // contains double-byte field(s)
+  static final int  _fmt_has_u4       = 1<<9;     // contains quad-byte field
+  static final int  _fmt_not_variable = 1<<10;    // not of variable length (simple or wide)
+  static final int  _fmt_not_simple   = 1<<11;    // either wide or variable length
+  static final int  _all_fmt_bits     = (_fmt_not_simple*2 - _fmt_has_c);
+
+  // Example derived format syndromes:
+  static final int  _fmt_b      = _fmt_not_variable;
+  static final int  _fmt_bc     = _fmt_b | _fmt_has_c;
+  static final int  _fmt_bi     = _fmt_b | _fmt_has_i;
+  static final int  _fmt_bkk    = _fmt_b | _fmt_has_k | _fmt_has_u2;
+  static final int  _fmt_bJJ    = _fmt_b | _fmt_has_j | _fmt_has_u2 | _fmt_has_nbo;
+  static final int  _fmt_bo2    = _fmt_b | _fmt_has_o | _fmt_has_u2;
+  static final int  _fmt_bo4    = _fmt_b | _fmt_has_o | _fmt_has_u4;
+
+
   public static int specialLengthAt(Method method, int bci) {
     int code = codeAt(method, bci);
     switch (code) {
@@ -337,18 +365,20 @@
   //   static Code       non_breakpoint_code_at(address bcp, methodOop method = null);
 
   // Bytecode attributes
-  public static boolean   isDefined    (int code) { return 0 <= code && code < number_of_codes && _format[code] != null; }
-  public static boolean   wideIsDefined(int code) { return isDefined(code) && _wide_format[code] != null; }
+  public static boolean   isDefined    (int code) { return 0 <= code && code < number_of_codes && flags(code, false) != 0; }
+  public static boolean   wideIsDefined(int code) { return isDefined(code) && flags(code, true) != 0; }
   public static String    name         (int code) { check(code);      return _name          [code]; }
   public static String    format       (int code) { check(code);      return _format        [code]; }
   public static String    wideFormat   (int code) { wideCheck(code);  return _wide_format   [code]; }
   public static int       resultType   (int code) { check(code);      return _result_type   [code]; }
   public static int       depth        (int code) { check(code);      return _depth         [code]; }
-  public static int       lengthFor    (int code) { check(code);      return _length        [code]; }
-  public static boolean   canTrap      (int code) { check(code);      return _can_trap      [code]; }
+  public static int       lengthFor    (int code) { check(code);      return _lengths       [code] & 0xF; }
+  public static int       wideLengthFor(int code) { check(code);      return _lengths       [code] >> 4; }
+  public static boolean   canTrap      (int code) { check(code);      return has_all_flags(code, _bc_can_trap, false); }
   public static int       javaCode     (int code) { check(code);      return _java_code     [code]; }
-  public static boolean   canRewrite   (int code) { check(code);      return _can_rewrite   [code]; }
-  public static int       wideLengthFor(int code) { wideCheck(code);  return wideFormat(code).length(); }
+  public static boolean   canRewrite   (int code) { check(code);      return has_all_flags(code, _bc_can_rewrite, false); }
+  public static boolean   native_byte_order(int code)  { check(code);      return has_all_flags(code, _fmt_has_nbo, false); }
+  public static boolean   uses_cp_cache  (int code)    { check(code);      return has_all_flags(code, _fmt_has_j, false); }
   public static int       lengthAt     (Method method, int bci) { int l = lengthFor(codeAt(method, bci)); return l > 0 ? l : specialLengthAt(method, bci); }
   public static int       javaLengthAt (Method method, int bci) { int l = lengthFor(javaCode(codeAt(method, bci))); return l > 0 ? l : specialLengthAt(method, bci); }
   public static boolean   isJavaCode   (int code) { return 0 <= code && code < number_of_java_codes; }
@@ -362,6 +392,92 @@
   public static boolean   isZeroConst  (int code) { return (code == _aconst_null || code == _iconst_0
                                                                                  || code == _fconst_0 || code == _dconst_0); }
 
+  static int         flags          (int code, boolean is_wide) {
+    assert code == (code & 0xff) : "must be a byte";
+    return _flags[code + (is_wide ? 256 : 0)];
+  }
+  static int         format_bits    (int code, boolean is_wide) { return flags(code, is_wide) & _all_fmt_bits; }
+  static boolean     has_all_flags  (int code, int test_flags, boolean is_wide) {
+    return (flags(code, is_wide) & test_flags) == test_flags;
+  }
+
+  static char compute_flags(String format) {
+    return compute_flags(format, 0);
+  }
+  static char compute_flags(String format, int more_flags) {
+    if (format == null)  return 0;  // not even more_flags
+    int flags = more_flags;
+    int fp = 0;
+    if (format.length() == 0) {
+      flags |= _fmt_not_simple; // but variable
+    } else {
+      switch (format.charAt(fp)) {
+      case 'b':
+        flags |= _fmt_not_variable;  // but simple
+        ++fp;  // skip 'b'
+        break;
+      case 'w':
+        flags |= _fmt_not_variable | _fmt_not_simple;
+        ++fp;  // skip 'w'
+      assert(format.charAt(fp) == 'b') : "wide format must start with 'wb'";
+        ++fp;  // skip 'b'
+        break;
+      }
+    }
+
+    boolean has_nbo = false, has_jbo = false;
+    int has_size = 0;
+    while (fp < format.length()) {
+      int this_flag = 0;
+      char fc = format.charAt(fp++);
+      switch (fc) {
+      case '_': continue;         // ignore these
+
+      case 'j': this_flag = _fmt_has_j; has_jbo = true; break;
+      case 'k': this_flag = _fmt_has_k; has_jbo = true; break;
+      case 'i': this_flag = _fmt_has_i; has_jbo = true; break;
+      case 'c': this_flag = _fmt_has_c; has_jbo = true; break;
+      case 'o': this_flag = _fmt_has_o; has_jbo = true; break;
+
+        // uppercase versions mark native byte order (from Rewriter)
+        // actually, only the 'J' case happens currently
+      case 'J': this_flag = _fmt_has_j; has_nbo = true; break;
+      case 'K': this_flag = _fmt_has_k; has_nbo = true; break;
+      case 'I': this_flag = _fmt_has_i; has_nbo = true; break;
+      case 'C': this_flag = _fmt_has_c; has_nbo = true; break;
+      case 'O': this_flag = _fmt_has_o; has_nbo = true; break;
+      default:  assert false : "bad char in format";
+      }
+
+      flags |= this_flag;
+
+      assert !(has_jbo && has_nbo) : "mixed byte orders in format";
+      if (has_nbo)
+        flags |= _fmt_has_nbo;
+
+      int this_size = 1;
+      if (fp < format.length() && format.charAt(fp) == fc) {
+        // advance beyond run of the same characters
+        this_size = 2;
+        while (fp  + 1 < format.length() && format.charAt(++fp) == fc)  this_size++;
+        switch (this_size) {
+        case 2: flags |= _fmt_has_u2; break;
+        case 4: flags |= _fmt_has_u4; break;
+        default: assert false : "bad rep count in format";
+        }
+      }
+      assert has_size == 0 ||                     // no field yet
+        this_size == has_size ||             // same size
+        this_size < has_size && fp == format.length() : // last field can be short
+             "mixed field sizes in format";
+      has_size = this_size;
+    }
+
+    assert flags == (char)flags : "change _format_flags";
+    return (char)flags;
+  }
+
+
   //----------------------------------------------------------------------
   // Internals only below this point
   //
@@ -371,10 +487,9 @@
   private static String[]    _wide_format;
   private static int[]       _result_type;
   private static byte[]      _depth;
-  private static byte[]      _length;
-  private static boolean[]   _can_trap;
+  private static byte[]      _lengths;
   private static int[]       _java_code;
-  private static boolean[]   _can_rewrite;
+  private static char[]      _flags;
 
   static {
     _name           = new String [number_of_codes];
@@ -382,10 +497,9 @@
     _wide_format    = new String [number_of_codes];
     _result_type    = new int    [number_of_codes]; // See BasicType.java
     _depth          = new byte   [number_of_codes];
-    _length         = new byte   [number_of_codes];
-    _can_trap       = new boolean[number_of_codes];
+    _lengths        = new byte   [number_of_codes];
     _java_code      = new int    [number_of_codes];
-    _can_rewrite    = new boolean[number_of_codes];
+    _flags          = new char[256 * 2]; // all second page for wide formats
 
     // In case we want to fetch this information from the VM in the
     // future
@@ -712,18 +826,19 @@
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(wide_format == null || format != null, "short form must exist if there's a wide form");
     }
+    int len  = (format      != null ? format.length()      : 0);
+    int wlen = (wide_format != null ? wide_format.length() : 0);
     _name          [code] = name;
-    _format        [code] = format;
-    _wide_format   [code] = wide_format;
     _result_type   [code] = result_type;
     _depth         [code] = (byte) depth;
-    _can_trap      [code] = can_trap;
-    _length        [code] = (byte) (format != null ? format.length() : 0);
+    _lengths       [code] = (byte)((wlen << 4) | (len & 0xF));
     _java_code     [code] = java_code;
-    if (java_code != code) {
-      _can_rewrite[java_code] = true;
-    } else {
-      _can_rewrite[java_code] = false;
-    }
+    _format        [code] = format;
+    _wide_format   [code] = wide_format;
+    int bc_flags = 0;
+    if (can_trap)           bc_flags |= _bc_can_trap;
+    if (java_code != code)  bc_flags |= _bc_can_rewrite;
+    _flags[code+0*256] = compute_flags(format,      bc_flags);
+    _flags[code+1*256] = compute_flags(wide_format, bc_flags);
   }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java	Thu Dec 22 15:46:11 2011 +0000
@@ -28,11 +28,13 @@
 
 import com.sun.jdi.*;
 
+import sun.jvm.hotspot.memory.SystemDictionary;
 import sun.jvm.hotspot.oops.Instance;
 import sun.jvm.hotspot.oops.InstanceKlass;
 import sun.jvm.hotspot.oops.ArrayKlass;
 import sun.jvm.hotspot.oops.JVMDIClassStatus;
 import sun.jvm.hotspot.oops.Klass;
+import sun.jvm.hotspot.oops.ObjArray;
 import sun.jvm.hotspot.oops.Oop;
 import sun.jvm.hotspot.oops.Symbol;
 import sun.jvm.hotspot.oops.DefaultHeapVisitor;
@@ -53,6 +55,7 @@
     private SoftReference methodsCache;
     private SoftReference allMethodsCache;
     private SoftReference nestedTypesCache;
+    private SoftReference methodInvokesCache;
 
     /* to mark when no info available */
     static final SDE NO_SDE_INFO_MARK = new SDE();
@@ -82,6 +85,27 @@
                 return method;
             }
         }
+        if (ref.getMethodHolder().equals(SystemDictionary.getMethodHandleKlass())) {
+          // invoke methods are generated as needed, so make mirrors as needed
+          List mis = null;
+          if (methodInvokesCache == null) {
+            mis = new ArrayList();
+            methodInvokesCache = new SoftReference(mis);
+          } else {
+            mis = (List)methodInvokesCache.get();
+          }
+          it = mis.iterator();
+          while (it.hasNext()) {
+            MethodImpl method = (MethodImpl)it.next();
+            if (ref.equals(method.ref())) {
+              return method;
+            }
+          }
+
+          MethodImpl method = MethodImpl.createMethodImpl(vm, this, ref);
+          mis.add(method);
+          return method;
+        }
         throw new IllegalArgumentException("Invalid method id: " + ref);
     }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/SADebugServer.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/SADebugServer.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,14 +51,6 @@
          usage();
       }
 
-      // By default, SA agent classes prefer dbx debugger to proc debugger
-      // and Windows process debugger to windbg debugger. SA expects
-      // special properties to be set to choose other debuggers. For SA/JDI,
-      // we choose proc, windbg debuggers instead of the defaults.
-
-      System.setProperty("sun.jvm.hotspot.debugger.useProcDebugger", "true");
-      System.setProperty("sun.jvm.hotspot.debugger.useWindbgDebugger", "true");
-
       // delegate to the actual SA debug server.
       sun.jvm.hotspot.DebugServer.main(args);
    }
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/StackFrameImpl.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/StackFrameImpl.java	Thu Dec 22 15:46:11 2011 +0000
@@ -123,6 +123,9 @@
                 Assert.that(values.size() > 0, "this is missing");
             }
             // 'this' at index 0.
+            if (values.get(0).getType() == BasicType.getTConflict()) {
+              return null;
+            }
             OopHandle handle = values.oopHandleAt(0);
             ObjectHeap heap = vm.saObjectHeap();
             thisObject = vm.objectMirror(heap.newOop(handle));
@@ -210,6 +213,8 @@
         validateStackFrame();
         StackValueCollection values = saFrame.getLocals();
         MethodImpl mmm = (MethodImpl)location.method();
+        if (mmm.isNative())
+            return null;
         List argSigs = mmm.argumentSignatures();
         int count = argSigs.size();
         List res = new ArrayList(0);
@@ -231,34 +236,67 @@
         ValueImpl valueImpl = null;
         OopHandle handle = null;
         ObjectHeap heap = vm.saObjectHeap();
-        if (variableType == BasicType.T_BOOLEAN) {
+        if (values.get(ss).getType() == BasicType.getTConflict()) {
+          // Dead locals, so just represent them as a zero of the appropriate type
+          if (variableType == BasicType.T_BOOLEAN) {
+            valueImpl = (BooleanValueImpl) vm.mirrorOf(false);
+          } else if (variableType == BasicType.T_CHAR) {
+            valueImpl = (CharValueImpl) vm.mirrorOf((char)0);
+          } else if (variableType == BasicType.T_FLOAT) {
+            valueImpl = (FloatValueImpl) vm.mirrorOf((float)0);
+          } else if (variableType == BasicType.T_DOUBLE) {
+            valueImpl = (DoubleValueImpl) vm.mirrorOf((double)0);
+          } else if (variableType == BasicType.T_BYTE) {
+            valueImpl = (ByteValueImpl) vm.mirrorOf((byte)0);
+          } else if (variableType == BasicType.T_SHORT) {
+            valueImpl = (ShortValueImpl) vm.mirrorOf((short)0);
+          } else if (variableType == BasicType.T_INT) {
+            valueImpl = (IntegerValueImpl) vm.mirrorOf((int)0);
+          } else if (variableType == BasicType.T_LONG) {
+            valueImpl = (LongValueImpl) vm.mirrorOf((long)0);
+          } else if (variableType == BasicType.T_OBJECT) {
+            // we may have an [Ljava/lang/Object; - i.e., Object[] with the
+            // elements themselves may be arrays because every array is an Object.
+            handle = null;
+            valueImpl = (ObjectReferenceImpl) vm.objectMirror(heap.newOop(handle));
+          } else if (variableType == BasicType.T_ARRAY) {
+            handle = null;
+            valueImpl = vm.arrayMirror((Array)heap.newOop(handle));
+          } else if (variableType == BasicType.T_VOID) {
+            valueImpl = new VoidValueImpl(vm);
+          } else {
+            throw new RuntimeException("Should not read here");
+          }
+        } else {
+          if (variableType == BasicType.T_BOOLEAN) {
             valueImpl = (BooleanValueImpl) vm.mirrorOf(values.booleanAt(ss));
-        } else if (variableType == BasicType.T_CHAR) {
+          } else if (variableType == BasicType.T_CHAR) {
             valueImpl = (CharValueImpl) vm.mirrorOf(values.charAt(ss));
-        } else if (variableType == BasicType.T_FLOAT) {
+          } else if (variableType == BasicType.T_FLOAT) {
             valueImpl = (FloatValueImpl) vm.mirrorOf(values.floatAt(ss));
-        } else if (variableType == BasicType.T_DOUBLE) {
+          } else if (variableType == BasicType.T_DOUBLE) {
             valueImpl = (DoubleValueImpl) vm.mirrorOf(values.doubleAt(ss));
-        } else if (variableType == BasicType.T_BYTE) {
+          } else if (variableType == BasicType.T_BYTE) {
             valueImpl = (ByteValueImpl) vm.mirrorOf(values.byteAt(ss));
-        } else if (variableType == BasicType.T_SHORT) {
+          } else if (variableType == BasicType.T_SHORT) {
             valueImpl = (ShortValueImpl) vm.mirrorOf(values.shortAt(ss));
-        } else if (variableType == BasicType.T_INT) {
+          } else if (variableType == BasicType.T_INT) {
             valueImpl = (IntegerValueImpl) vm.mirrorOf(values.intAt(ss));
-        } else if (variableType == BasicType.T_LONG) {
+          } else if (variableType == BasicType.T_LONG) {
             valueImpl = (LongValueImpl) vm.mirrorOf(values.longAt(ss));
-        } else if (variableType == BasicType.T_OBJECT) {
+          } else if (variableType == BasicType.T_OBJECT) {
             // we may have an [Ljava/lang/Object; - i.e., Object[] with the
             // elements themselves may be arrays because every array is an Object.
             handle = values.oopHandleAt(ss);
             valueImpl = (ObjectReferenceImpl) vm.objectMirror(heap.newOop(handle));
-        } else if (variableType == BasicType.T_ARRAY) {
+          } else if (variableType == BasicType.T_ARRAY) {
             handle = values.oopHandleAt(ss);
             valueImpl = vm.arrayMirror((Array)heap.newOop(handle));
-        } else if (variableType == BasicType.T_VOID) {
+          } else if (variableType == BasicType.T_VOID) {
             valueImpl = new VoidValueImpl(vm);
-        } else {
+          } else {
             throw new RuntimeException("Should not read here");
+          }
         }
 
         return valueImpl;
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java	Thu Dec 22 15:46:11 2011 +0000
@@ -263,14 +263,6 @@
                                             this.hashCode() + "]");
 
         ((com.sun.tools.jdi.VirtualMachineManagerImpl)mgr).addVirtualMachine(this);
-
-        // By default SA agent classes prefer dbx debugger to proc debugger
-        // and Windows process debugger to windbg debugger. SA expects
-        // special properties to be set to choose other debuggers. We will set
-        // those here before attaching to SA agent.
-
-        System.setProperty("sun.jvm.hotspot.debugger.useProcDebugger", "true");
-        System.setProperty("sun.jvm.hotspot.debugger.useWindbgDebugger", "true");
     }
 
     // we reflectively use newly spec'ed class because our ALT_BOOTDIR
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Thu Dec 22 15:46:11 2011 +0000
@@ -44,6 +44,7 @@
   private static sun.jvm.hotspot.types.OopField systemKlassField;
   private static sun.jvm.hotspot.types.OopField threadKlassField;
   private static sun.jvm.hotspot.types.OopField threadGroupKlassField;
+  private static sun.jvm.hotspot.types.OopField methodHandleKlassField;
 
   static {
     VM.registerVMInitializedObserver(new Observer() {
@@ -69,6 +70,7 @@
     systemKlassField = type.getOopField(WK_KLASS("System_klass"));
     threadKlassField = type.getOopField(WK_KLASS("Thread_klass"));
     threadGroupKlassField = type.getOopField(WK_KLASS("ThreadGroup_klass"));
+    methodHandleKlassField = type.getOopField(WK_KLASS("MethodHandle_klass"));
   }
 
   // This WK functions must follow the definitions in systemDictionary.hpp:
@@ -127,6 +129,10 @@
     return (InstanceKlass) newOop(systemKlassField.getValue());
   }
 
+  public static InstanceKlass getMethodHandleKlass() {
+    return (InstanceKlass) newOop(methodHandleKlassField.getValue());
+  }
+
   public InstanceKlass getAbstractOwnableSynchronizerKlass() {
     return (InstanceKlass) find("java/util/concurrent/locks/AbstractOwnableSynchronizer",
                                 null, null);
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 import java.util.*;
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.gc_interface.*;
+import sun.jvm.hotspot.gc_implementation.g1.G1CollectedHeap;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.oops.*;
 import sun.jvm.hotspot.types.*;
@@ -72,6 +73,7 @@
     heapConstructor = new VirtualConstructor(db);
     heapConstructor.addMapping("GenCollectedHeap", GenCollectedHeap.class);
     heapConstructor.addMapping("ParallelScavengeHeap", ParallelScavengeHeap.class);
+    heapConstructor.addMapping("G1CollectedHeap", G1CollectedHeap.class);
 
     mainThreadGroupField   = type.getOopField("_main_thread_group");
     systemThreadGroupField = type.getOopField("_system_thread_group");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ArrayData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+// ArrayData
+//
+// A ArrayData is a base class for accessing profiling data which does
+// not have a statically known size.  It consists of an array length
+// and an array start.
+abstract class ArrayData extends ProfileData {
+
+  static final int arrayLenOffSet = 0;
+  static final int arrayStartOffSet = 1;
+
+  int arrayUintAt(int index) {
+    int aindex = index + arrayStartOffSet;
+    return uintAt(aindex);
+  }
+  int arrayIntAt(int index) {
+    int aindex = index + arrayStartOffSet;
+    return intAt(aindex);
+  }
+  Oop arrayOopAt(int index) {
+    int aindex = index + arrayStartOffSet;
+    return oopAt(aindex);
+  }
+
+  // Code generation support for subclasses.
+  static int arrayElementOffset(int index) {
+    return cellOffset(arrayStartOffSet + index);
+  }
+
+  ArrayData(DataLayout layout) {
+    super(layout);
+  }
+
+  static int staticCellCount() {
+    return -1;
+  }
+
+  int arrayLen() {
+    return intAt(arrayLenOffSet);
+  }
+
+  public int cellCount() {
+    return arrayLen() + 1;
+  }
+
+  // Code generation support
+  static int arrayLenOffset() {
+    return cellOffset(arrayLenOffSet);
+  }
+  static int arrayStartOffset() {
+    return cellOffset(arrayStartOffSet);
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/BitData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+// BitData
+//
+// A BitData holds a flag or two in its header.
+public class BitData extends ProfileData {
+
+  // nullSeen:
+  //  saw a null operand (cast/aastore/instanceof)
+  static final int nullSeenFlag              = DataLayout.firstFlag + 0;
+  static final int bitCellCount = 0;
+
+  public BitData(DataLayout layout) {
+    super(layout);
+  }
+
+  static int staticCellCount() {
+    return bitCellCount;
+  }
+
+  public int cellCount() {
+    return staticCellCount();
+  }
+
+  // Accessor
+
+  // The nullSeen flag bit is specially known to the interpreter.
+  // Consulting it allows the compiler to avoid setting up nullCheck traps.
+  boolean nullSeen()     { return flagAt(nullSeenFlag); }
+
+  // Code generation support
+  // static int nullSeenByteConstant() {
+  //   return flagNumberToByteConstant(nullSeenFlag);
+  // }
+
+  static int bitDataSize() {
+    return cellOffset(bitCellCount);
+  }
+
+  public void printDataOn(PrintStream st) {
+    printShared(st, "BitData");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/BranchData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+// BranchData
+//
+// A BranchData is used to access profiling data for a two-way branch.
+// It consists of taken and notTaken counts as well as a data displacement
+// for the taken case.
+public class BranchData extends JumpData {
+
+  static final int notTakenOffSet = jumpCellCount;
+  static final int branchCellCount = notTakenOffSet + 1;
+
+  public BranchData(DataLayout layout) {
+    super(layout);
+    //assert(layout.tag() == DataLayout.branchDataTag, "wrong type");
+  }
+
+  static int staticCellCount() {
+    return branchCellCount;
+  }
+
+  public int cellCount() {
+    return staticCellCount();
+  }
+
+  // Direct accessor
+  int notTaken() {
+    return uintAt(notTakenOffSet);
+  }
+
+  // Code generation support
+  static int notTakenOffset() {
+    return cellOffset(notTakenOffSet);
+  }
+  static int branchDataSize() {
+    return cellOffset(branchCellCount);
+  }
+
+  public void printDataOn(PrintStream st) {
+    printShared(st, "BranchData");
+    st.println("taken(" + taken() + ") displacement(" + displacement() + ")");
+    tab(st);
+    st.println("not taken(" + notTaken() + ")");
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/CIntField.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/CIntField.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,9 @@
   public long getValue(Oop obj) {
     return obj.getHandle().getCIntegerAt(getOffset(), size, isUnsigned);
   }
+  public long getValue(Address addr) {
+    return addr.getCIntegerAt(getOffset(), size, isUnsigned);
+  }
   public void setValue(Oop obj, long value) throws MutationException {
     // Fix this: set* missing in Address
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java	Thu Dec 22 15:46:11 2011 +0000
@@ -164,6 +164,18 @@
     return (short) ((hi << 8) | lo);
   }
 
+  /** Fetches a 16-bit native ordered value from the
+      bytecode stream */
+  public short getNativeShortArg(int bci) {
+    int hi = getBytecodeOrBPAt(bci);
+    int lo = getBytecodeOrBPAt(bci + 1);
+    if (VM.getVM().isBigEndian()) {
+        return (short) ((hi << 8) | lo);
+    } else {
+        return (short) ((lo << 8) | hi);
+    }
+  }
+
   /** Fetches a 32-bit big-endian ("Java ordered") value from the
       bytecode stream */
   public int getBytecodeIntArg(int bci) {
@@ -175,6 +187,21 @@
     return (b4 << 24) | (b3 << 16) | (b2 << 8) | b1;
   }
 
+  /** Fetches a 32-bit native ordered value from the
+      bytecode stream */
+  public int getNativeIntArg(int bci) {
+    int b4 = getBytecodeOrBPAt(bci);
+    int b3 = getBytecodeOrBPAt(bci + 1);
+    int b2 = getBytecodeOrBPAt(bci + 2);
+    int b1 = getBytecodeOrBPAt(bci + 3);
+
+    if (VM.getVM().isBigEndian()) {
+        return (b4 << 24) | (b3 << 16) | (b2 << 8) | b1;
+    } else {
+        return (b1 << 24) | (b2 << 16) | (b3 << 8) | b4;
+    }
+  }
+
   public byte[] getByteCode() {
      byte[] bc = new byte[ (int) getCodeSize() ];
      for( int i=0; i < bc.length; i++ )
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Thu Dec 22 15:46:11 2011 +0000
@@ -212,13 +212,60 @@
   }
 
   public Symbol getNameRefAt(int which) {
-    int nameIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[0];
-    return getSymbolAt(nameIndex);
+    return implGetNameRefAt(which, false);
+  }
+
+  private Symbol implGetNameRefAt(int which, boolean uncached) {
+    int signatureIndex = getNameRefIndexAt(implNameAndTypeRefIndexAt(which, uncached));
+    return getSymbolAt(signatureIndex);
   }
 
   public Symbol getSignatureRefAt(int which) {
-    int sigIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[1];
-    return getSymbolAt(sigIndex);
+    return implGetSignatureRefAt(which, false);
+  }
+
+  private Symbol implGetSignatureRefAt(int which, boolean uncached) {
+    int signatureIndex = getSignatureRefIndexAt(implNameAndTypeRefIndexAt(which, uncached));
+    return getSymbolAt(signatureIndex);
+  }
+
+
+  private int implNameAndTypeRefIndexAt(int which, boolean uncached) {
+    int i = which;
+    if (!uncached && getCache() != null) {
+      if (ConstantPoolCache.isSecondaryIndex(which)) {
+        // Invokedynamic index.
+        int pool_index = getCache().getMainEntryAt(which).getConstantPoolIndex();
+        pool_index = invokeDynamicNameAndTypeRefIndexAt(pool_index);
+        // assert(tagAt(pool_index).isNameAndType(), "");
+        return pool_index;
+      }
+      // change byte-ordering and go via cache
+      i = remapInstructionOperandFromCache(which);
+    } else {
+      if (getTagAt(which).isInvokeDynamic()) {
+        int pool_index = invokeDynamicNameAndTypeRefIndexAt(which);
+        // assert(tag_at(pool_index).is_name_and_type(), "");
+        return pool_index;
+      }
+    }
+    // assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
+    // assert(!tag_at(i).is_invoke_dynamic(), "Must be handled above");
+    int ref_index = getIntAt(i);
+    return extractHighShortFromInt(ref_index);
+  }
+
+  private int remapInstructionOperandFromCache(int operand) {
+    int cpc_index = operand;
+    // DEBUG_ONLY(cpc_index -= CPCACHE_INDEX_TAG);
+    // assert((int)(u2)cpc_index == cpc_index, "clean u2");
+    int member_index = getCache().getEntryAt(cpc_index).getConstantPoolIndex();
+    return member_index;
+  }
+
+  int invokeDynamicNameAndTypeRefIndexAt(int which) {
+    // assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
+    return extractHighShortFromInt(getIntAt(which));
   }
 
   // returns null, if not resolved.
@@ -253,15 +300,7 @@
   }
 
   public int getNameAndTypeRefIndexAt(int index) {
-    int refIndex = getFieldOrMethodAt(index);
-    if (DEBUG) {
-      System.err.println("ConstantPool.getNameAndTypeRefIndexAt(" + index + "): refIndex = " + refIndex);
-    }
-    int i = extractHighShortFromInt(refIndex);
-    if (DEBUG) {
-      System.err.println("ConstantPool.getNameAndTypeRefIndexAt(" + index + "): result = " + i);
-    }
-    return i;
+    return implNameAndTypeRefIndexAt(index, false);
   }
 
   /** Lookup for entries consisting of (name_index, signature_index) */
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,9 +72,7 @@
   }
 
   public ConstantPoolCacheEntry getEntryAt(int i) {
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(0 <= i && i < getLength(), "index out of bounds");
-    }
+    if (i < 0 || i >= getLength()) throw new IndexOutOfBoundsException(i + " " + getLength());
     return new ConstantPoolCacheEntry(this, i);
   }
 
@@ -84,21 +82,27 @@
 
   // secondary entries hold invokedynamic call site bindings
   public ConstantPoolCacheEntry getSecondaryEntryAt(int i) {
-    ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, decodeSecondaryIndex(i));
+    int rawIndex = i;
+    if (isSecondaryIndex(i)) {
+      rawIndex = decodeSecondaryIndex(i);
+    }
+    ConstantPoolCacheEntry e = getEntryAt(rawIndex);
     if (Assert.ASSERTS_ENABLED) {
-      Assert.that(e.isSecondaryEntry(), "must be a secondary entry");
+      Assert.that(e.isSecondaryEntry(), "must be a secondary entry:" + rawIndex);
     }
     return e;
   }
 
   public ConstantPoolCacheEntry getMainEntryAt(int i) {
+    int primaryIndex = i;
     if (isSecondaryIndex(i)) {
       // run through an extra level of indirection:
-      i = getSecondaryEntryAt(i).getMainEntryIndex();
+      int rawIndex = decodeSecondaryIndex(i);
+      primaryIndex = getEntryAt(rawIndex).getMainEntryIndex();
     }
-    ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, i);
+    ConstantPoolCacheEntry e = getEntryAt(primaryIndex);
     if (Assert.ASSERTS_ENABLED) {
-      Assert.that(!e.isSecondaryEntry(), "must not be a secondary entry");
+      Assert.that(!e.isSecondaryEntry(), "must not be a secondary entry:" + primaryIndex);
     }
     return e;
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/CounterData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+// CounterData
+//
+// A CounterData corresponds to a simple counter.
+public class CounterData extends BitData {
+
+  static final int countOff = 0;
+  static final int counterCellCount = 1;
+
+  public CounterData(DataLayout layout) {
+    super(layout);
+  }
+
+  static int staticCellCount() {
+    return counterCellCount;
+  }
+
+  public int cellCount() {
+    return staticCellCount();
+  }
+
+  // Direct accessor
+  int count() {
+    return uintAt(countOff);
+  }
+
+  // Code generation support
+  static int countOffset() {
+    return cellOffset(countOff);
+  }
+  static int counterDataSize() {
+    return cellOffset(counterCellCount);
+  }
+
+  public void printDataOn(PrintStream st) {
+    printShared(st, "CounterData");
+    st.println("count(" + count() + ")");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/DataLayout.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+public class DataLayout {
+  public static final int noTag = 0;
+  public static final int bitDataTag = 1;
+  public static final int counterDataTag = 2;
+  public static final int jumpDataTag= 3;
+  public static final int receiverTypeDataTag = 4;
+  public static final int virtualCallDataTag = 5;
+  public static final int retDataTag = 6;
+  public static final int branchDataTag = 7;
+  public static final int multiBranchDataTag = 8;
+
+  // The _struct._flags word is formatted as [trapState:4 | flags:4].
+  // The trap state breaks down further as [recompile:1 | reason:3].
+  // This further breakdown is defined in deoptimization.cpp.
+  // See Deoptimization.trapStateReason for an assert that
+  // trapBits is big enough to hold reasons < reasonRecordedLimit.
+  //
+  // The trapState is collected only if ProfileTraps is true.
+  public static final int trapBits = 1+3;  // 3: enough to distinguish [0..reasonRecordedLimit].
+  public static final int trapShift = 8 - trapBits;
+  public static final int trapMask = Bits.rightNBits(trapBits);
+  public static final int trapMaskInPlace = (trapMask << trapShift);
+  public static final int flagLimit = trapShift;
+  public static final int flagMask = Bits.rightNBits(flagLimit);
+  public static final int firstFlag = 0;
+
+  private Address data;
+
+  private int offset;
+
+  private boolean handlized;
+
+  public DataLayout(MethodData d, int o) {
+    data = d.getHandle();
+    offset = o;
+  }
+
+  public DataLayout(Address d, int o) {
+    data = d;
+    offset = o;
+    handlized = true;
+  }
+
+  public int dp() { return offset; }
+
+  private int getU11(int at) {
+    return data.getJByteAt(offset + at) & 0xff;
+  }
+
+  private int getU22(int at) {
+    return data.getJShortAt(offset + at) & 0xffff;
+  }
+
+  int cellAt(int index) {
+    // Cells are intptr_t sized but only contain ints as raw values
+    return (int)data.getCIntegerAt(offset + cellOffset(index), MethodData.cellSize, false);
+  }
+
+  Oop oopAt(int index) {
+    OopHandle handle;
+    if (handlized) {
+      throw new InternalError("unsupported");
+    }
+    handle = data.getOopHandleAt(offset + cellOffset(index));
+    return VM.getVM().getObjectHeap().newOop(handle);
+  }
+
+  public Address addressAt(int index) {
+    OopHandle handle;
+    if (handlized) {
+      return data.getAddressAt(offset + cellOffset(index));
+    } else {
+      return data.getOopHandleAt(offset + cellOffset(index));
+    }
+  }
+
+  // Every data layout begins with a header.  This header
+  // contains a tag, which is used to indicate the size/layout
+  // of the data, 4 bits of flags, which can be used in any way,
+  // 4 bits of trap history (none/one reason/many reasons),
+  // and a bci, which is used to tie this piece of data to a
+  // specific bci in the bytecodes.
+  // union {
+  //   intptrT _bits;
+  //   struct {
+  //     u1 _tag;
+  //     u1 _flags;
+  //     u2 _bci;
+  //   } _struct;
+  // } _header;
+
+  // Some types of data layouts need a length field.
+  static boolean needsArrayLen(int tag) {
+    return (tag == multiBranchDataTag);
+  }
+
+  public static final int counterIncrement = 1;
+
+  // Size computation
+  static int headerSizeInBytes() {
+    return MethodData.cellSize;
+  }
+  static int headerSizeInCells() {
+    return 1;
+  }
+
+  static int computeSizeInBytes(int cellCount) {
+    return headerSizeInBytes() + cellCount * MethodData.cellSize;
+  }
+
+  // Initialization
+  // void initialize(int tag, int bci, int cellCount);
+
+  // Accessors
+  public int tag() {
+    return getU11(0);
+  }
+
+  // Return a few bits of trap state.  Range is [0..trapMask].
+  // The state tells if traps with zero, one, or many reasons have occurred.
+  // It also tells whether zero or many recompilations have occurred.
+  // The associated trap histogram in the MDO itself tells whether
+  // traps are common or not.  If a BCI shows that a trap X has
+  // occurred, and the MDO shows N occurrences of X, we make the
+  // simplifying assumption that all N occurrences can be blamed
+  // on that BCI.
+  int trapState() {
+    return (flags() >> trapShift) & trapMask;
+  }
+
+  int flags() {
+    return getU11(1);
+  }
+
+  int bci() {
+    return getU22(2);
+  }
+
+  boolean flagAt(int flagNumber) {
+    // assert(flagNumber < flagLimit, "oob");
+    return (flags() & (0x1 << flagNumber)) != 0;
+  }
+
+  // Low-level support for code generation.
+  static int headerOffset() {
+    return 0;
+  }
+  static int tagOffset() {
+    return 0;
+  }
+  static int flagsOffset() {
+    return 1;
+  }
+  static int bciOffset() {
+    return 2;
+  }
+  public static int cellOffset(int index) {
+    return MethodData.cellSize + index * MethodData.cellSize;
+  }
+  // // Return a value which, when or-ed as a byte into _flags, sets the flag.
+  // static int flagNumberToByteConstant(int flagNumber) {
+  //   assert(0 <= flagNumber && flagNumber < flagLimit, "oob");
+  //   DataLayout temp; temp.setHeader(0);
+  //   temp.setFlagAt(flagNumber);
+  //   return temp._header._struct._flags;
+  // }
+  // // Return a value which, when or-ed as a word into _header, sets the flag.
+  // static intptrT flagMaskToHeaderMask(int byteConstant) {
+  //   DataLayout temp; temp.setHeader(0);
+  //   temp._header._struct._flags = byteConstant;
+  //   return temp._header._bits;
+  // }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Field.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Field.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,28 +39,20 @@
 
   /** Constructor for fields that are named in an InstanceKlass's
       fields array (i.e., named, non-VM fields) */
-  Field(InstanceKlass holder, int fieldArrayIndex) {
+  Field(InstanceKlass holder, int fieldIndex) {
     this.holder = holder;
-    this.fieldArrayIndex = fieldArrayIndex;
+    this.fieldIndex = fieldIndex;
+
+    offset               = holder.getFieldOffset(fieldIndex);
+    genericSignature     = holder.getFieldGenericSignature(fieldIndex);
 
-    ConstantPool cp      = holder.getConstants();
-    TypeArray fields     = holder.getFields();
-    short access         = fields.getShortAt(fieldArrayIndex + InstanceKlass.ACCESS_FLAGS_OFFSET);
-    short nameIndex      = fields.getShortAt(fieldArrayIndex + InstanceKlass.NAME_INDEX_OFFSET);
-    short signatureIndex = fields.getShortAt(fieldArrayIndex + InstanceKlass.SIGNATURE_INDEX_OFFSET);
-    offset               = VM.getVM().buildIntFromShorts(fields.getShortAt(fieldArrayIndex + InstanceKlass.LOW_OFFSET),
-                                                         fields.getShortAt(fieldArrayIndex + InstanceKlass.HIGH_OFFSET));
-    short genericSignatureIndex = fields.getShortAt(fieldArrayIndex + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET);
-    Symbol name = cp.getSymbolAt(nameIndex);
+    Symbol name          = holder.getFieldName(fieldIndex);
     id          = new NamedFieldIdentifier(name.asString());
-    signature   = cp.getSymbolAt(signatureIndex);
-    if (genericSignatureIndex != 0)  {
-       genericSignature = cp.getSymbolAt(genericSignatureIndex);
-    } else {
-       genericSignature = null;
-    }
 
+    signature            = holder.getFieldSignature(fieldIndex);
     fieldType   = new FieldType(signature);
+
+    short access         = holder.getFieldAccessFlags(fieldIndex);
     accessFlags = new AccessFlags(access);
   }
 
@@ -73,7 +65,7 @@
   private Symbol          signature;
   private Symbol          genericSignature;
   private AccessFlags     accessFlags;
-  private int             fieldArrayIndex;
+  private int             fieldIndex;
 
   /** Returns the byte offset of the field within the object or klass */
   public long getOffset() { return offset; }
@@ -101,8 +93,8 @@
   /** (Named, non-VM fields only) Returns the index in the fields
       TypeArray for this field. Equivalent to the "index" in the VM's
       fieldDescriptors. */
-  public int getFieldArrayIndex() {
-    return fieldArrayIndex;
+  public int getFieldIndex() {
+    return fieldIndex;
   }
 
   /** (Named, non-VM fields only) Retrieves the access flags. */
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/FieldType.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/FieldType.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,6 +67,8 @@
   public boolean isObject()  { return first == 'L'; }
   public boolean isArray()   { return first == '['; }
 
+  public Symbol getSignature() { return signature; }
+
   public static class ArrayInfo {
     private int dimension;
     private int elementBasicType; // See BasicType.java
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java	Thu Dec 22 15:46:11 2011 +0000
@@ -569,10 +569,10 @@
       case Bytecodes._invokedynamic:
         // FIXME: print signature of referenced method (need more
         // accessors in ConstantPool and ConstantPoolCache)
-        int idx = currentBC.getIndexBig();
+        int idx = currentBC.hasIndexU4() ? currentBC.getIndexU4() : currentBC.getIndexU2();
         tty.print(" idx " + idx);
         /*
-          int idx = currentBC.getIndexBig();
+          int idx = currentBC.getIndexU2();
           ConstantPool cp       = method().getConstants();
           int nameAndTypeIdx    = cp.name_and_type_ref_index_at(idx);
           int signatureIdx      = cp.signature_ref_index_at(nameAndTypeIdx);
@@ -609,10 +609,10 @@
       case Bytecodes._invokedynamic:
         // FIXME: print signature of referenced method (need more
         // accessors in ConstantPool and ConstantPoolCache)
-        int idx = currentBC.getIndexBig();
+        int idx = currentBC.hasIndexU4() ? currentBC.getIndexU4() : currentBC.getIndexU2();
         tty.print(" idx " + idx);
         /*
-          int idx = currentBC.getIndexBig();
+          int idx = currentBC.getIndexU2();
           constantPoolOop cp    = method().constants();
           int nameAndTypeIdx    = cp.name_and_type_ref_index_at(idx);
           int signatureIdx      = cp.signature_ref_index_at(nameAndTypeIdx);
@@ -1118,7 +1118,8 @@
       current instruction, starting in the current state. */
   void  interp1                             (BytecodeStream itr) {
     if (DEBUG) {
-      System.err.println(" - bci " + itr.bci());
+      System.err.println(" - bci " + itr.bci() + " " + itr.code());
+      printCurrentState(System.err, itr, false);
     }
 
     //    if (TraceNewOopMapGeneration) {
@@ -1179,8 +1180,8 @@
 
     case Bytecodes._ldc2_w:            ppush(vvCTS);               break;
 
-    case Bytecodes._ldc:               doLdc(itr.getIndex(), itr.bci());    break;
-    case Bytecodes._ldc_w:             doLdc(itr.getIndexBig(), itr.bci());break;
+    case Bytecodes._ldc:               doLdc(itr.bci());           break;
+    case Bytecodes._ldc_w:             doLdc(itr.bci());           break;
 
     case Bytecodes._iload:
     case Bytecodes._fload:             ppload(vCTS, itr.getIndex()); break;
@@ -1372,18 +1373,16 @@
     case Bytecodes._jsr:               doJsr(itr.dest());          break;
     case Bytecodes._jsr_w:             doJsr(itr.dest_w());        break;
 
-    case Bytecodes._getstatic:         doField(true,  true,
-                                               itr.getIndexBig(),
-                                               itr.bci()); break;
-    case Bytecodes._putstatic:         doField(false, true,  itr.getIndexBig(), itr.bci()); break;
-    case Bytecodes._getfield:          doField(true,  false, itr.getIndexBig(), itr.bci()); break;
-    case Bytecodes._putfield:          doField(false, false, itr.getIndexBig(), itr.bci()); break;
+    case Bytecodes._getstatic:         doField(true,  true,  itr.getIndexU2Cpcache(), itr.bci()); break;
+    case Bytecodes._putstatic:         doField(false, true,  itr.getIndexU2Cpcache(), itr.bci()); break;
+    case Bytecodes._getfield:          doField(true,  false, itr.getIndexU2Cpcache(), itr.bci()); break;
+    case Bytecodes._putfield:          doField(false, false, itr.getIndexU2Cpcache(), itr.bci()); break;
 
     case Bytecodes._invokevirtual:
-    case Bytecodes._invokespecial:     doMethod(false, false, itr.getIndexBig(), itr.bci()); break;
-    case Bytecodes._invokestatic:      doMethod(true,  false, itr.getIndexBig(), itr.bci()); break;
-    case Bytecodes._invokedynamic:     doMethod(false, true,  itr.getIndexBig(), itr.bci()); break;
-    case Bytecodes._invokeinterface:   doMethod(false, true,  itr.getIndexBig(), itr.bci()); break;
+    case Bytecodes._invokespecial:     doMethod(false, false, itr.getIndexU2Cpcache(), itr.bci()); break;
+    case Bytecodes._invokestatic:      doMethod(true,  false, itr.getIndexU2Cpcache(), itr.bci()); break;
+    case Bytecodes._invokedynamic:     doMethod(true,  false, itr.getIndexU4(),        itr.bci()); break;
+    case Bytecodes._invokeinterface:   doMethod(false,  true, itr.getIndexU2Cpcache(), itr.bci()); break;
     case Bytecodes._newarray:
     case Bytecodes._anewarray:         ppNewRef(vCTS, itr.bci()); break;
     case Bytecodes._checkcast:         doCheckcast(); break;
@@ -1665,13 +1664,11 @@
     }
   }
 
-  void  doLdc                               (int idx, int bci) {
+  void  doLdc                               (int bci) {
+    BytecodeLoadConstant ldc = BytecodeLoadConstant.at(_method, bci);
     ConstantPool  cp  = method().getConstants();
-    ConstantTag   tag = cp.getTagAt(idx);
-    CellTypeState cts = (tag.isString() || tag.isUnresolvedString() ||
-                         tag.isKlass() || tag.isUnresolvedKlass())
-                          ? CellTypeState.makeLineRef(bci)
-                          : valCTS;
+    BasicType     bt = ldc.resultType();
+    CellTypeState cts = (bt == BasicType.T_OBJECT) ? CellTypeState.makeLineRef(bci) : valCTS;
     ppush1(cts);
   }
 
@@ -1729,15 +1726,7 @@
   void  doMethod                            (boolean is_static, boolean is_interface, int idx, int bci) {
     // Dig up signature for field in constant pool
     ConstantPool cp       = _method.getConstants();
-    int nameAndTypeIdx    = cp.getTagAt(idx).isNameAndType() ? idx : cp.getNameAndTypeRefIndexAt(idx);
-    int signatureIdx      = cp.getSignatureRefIndexAt(nameAndTypeIdx);
-    Symbol signature      = cp.getSymbolAt(signatureIdx);
-
-    if (DEBUG) {
-      System.err.println("doMethod: signature = " + signature.asString() + ", idx = " + idx +
-                         ", nameAndTypeIdx = " + nameAndTypeIdx + ", signatureIdx = " + signatureIdx +
-                         ", bci = " + bci);
-    }
+    Symbol signature      = cp.getSignatureRefAt(idx);
 
     // Parse method signature
     CellTypeStateList out = new CellTypeStateList(4);
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Thu Dec 22 15:46:11 2011 +0000
@@ -44,14 +44,14 @@
   }
 
   // field offset constants
-  public static int ACCESS_FLAGS_OFFSET;
-  public static int NAME_INDEX_OFFSET;
-  public static int SIGNATURE_INDEX_OFFSET;
-  public static int INITVAL_INDEX_OFFSET;
-  public static int LOW_OFFSET;
-  public static int HIGH_OFFSET;
-  public static int GENERIC_SIGNATURE_INDEX_OFFSET;
-  public static int NEXT_OFFSET;
+  private static int ACCESS_FLAGS_OFFSET;
+  private static int NAME_INDEX_OFFSET;
+  private static int SIGNATURE_INDEX_OFFSET;
+  private static int INITVAL_INDEX_OFFSET;
+  private static int LOW_OFFSET;
+  private static int HIGH_OFFSET;
+  private static int GENERIC_SIGNATURE_INDEX_OFFSET;
+  private static int FIELD_SLOTS;
   public static int IMPLEMENTORS_LIMIT;
 
   // ClassState constants
@@ -78,6 +78,7 @@
       implementors[i]    = new OopField(type.getOopField("_implementors[0]"), arrayOffset);
     }
     fields               = new OopField(type.getOopField("_fields"), Oop.getHeaderSize());
+    javaFieldsCount      = new CIntField(type.getCIntegerField("_java_fields_count"), Oop.getHeaderSize());
     constants            = new OopField(type.getOopField("_constants"), Oop.getHeaderSize());
     classLoader          = new OopField(type.getOopField("_class_loader"), Oop.getHeaderSize());
     protectionDomain     = new OopField(type.getOopField("_protection_domain"), Oop.getHeaderSize());
@@ -100,14 +101,14 @@
     headerSize           = alignObjectOffset(Oop.getHeaderSize() + type.getSize());
 
     // read field offset constants
-    ACCESS_FLAGS_OFFSET = db.lookupIntConstant("instanceKlass::access_flags_offset").intValue();
-    NAME_INDEX_OFFSET = db.lookupIntConstant("instanceKlass::name_index_offset").intValue();
-    SIGNATURE_INDEX_OFFSET = db.lookupIntConstant("instanceKlass::signature_index_offset").intValue();
-    INITVAL_INDEX_OFFSET = db.lookupIntConstant("instanceKlass::initval_index_offset").intValue();
-    LOW_OFFSET = db.lookupIntConstant("instanceKlass::low_offset").intValue();
-    HIGH_OFFSET = db.lookupIntConstant("instanceKlass::high_offset").intValue();
-    GENERIC_SIGNATURE_INDEX_OFFSET = db.lookupIntConstant("instanceKlass::generic_signature_offset").intValue();
-    NEXT_OFFSET = db.lookupIntConstant("instanceKlass::next_offset").intValue();
+    ACCESS_FLAGS_OFFSET            = db.lookupIntConstant("FieldInfo::access_flags_offset").intValue();
+    NAME_INDEX_OFFSET              = db.lookupIntConstant("FieldInfo::name_index_offset").intValue();
+    SIGNATURE_INDEX_OFFSET         = db.lookupIntConstant("FieldInfo::signature_index_offset").intValue();
+    INITVAL_INDEX_OFFSET           = db.lookupIntConstant("FieldInfo::initval_index_offset").intValue();
+    LOW_OFFSET                     = db.lookupIntConstant("FieldInfo::low_offset").intValue();
+    HIGH_OFFSET                    = db.lookupIntConstant("FieldInfo::high_offset").intValue();
+    GENERIC_SIGNATURE_INDEX_OFFSET = db.lookupIntConstant("FieldInfo::generic_signature_offset").intValue();
+    FIELD_SLOTS                    = db.lookupIntConstant("FieldInfo::field_slots").intValue();
     // read ClassState constants
     CLASS_STATE_UNPARSABLE_BY_GC = db.lookupIntConstant("instanceKlass::unparsable_by_gc").intValue();
     CLASS_STATE_ALLOCATED = db.lookupIntConstant("instanceKlass::allocated").intValue();
@@ -121,6 +122,13 @@
 
   InstanceKlass(OopHandle handle, ObjectHeap heap) {
     super(handle, heap);
+    if (getJavaFieldsCount() != getAllFieldsCount()) {
+      // Exercise the injected field logic
+      for (int i = getJavaFieldsCount(); i < getAllFieldsCount(); i++) {
+        getFieldName(i);
+        getFieldSignature(i);
+      }
+    }
   }
 
   private static OopField  arrayKlasses;
@@ -131,6 +139,7 @@
   private static CIntField nofImplementors;
   private static OopField[] implementors;
   private static OopField  fields;
+  private static CIntField javaFieldsCount;
   private static OopField  constants;
   private static OopField  classLoader;
   private static OopField  protectionDomain;
@@ -172,7 +181,7 @@
      private String value;
   }
 
-  private int  getInitStateAsInt() { return (int) initState.getValue(this); }
+  public int  getInitStateAsInt() { return (int) initState.getValue(this); }
   public ClassState getInitState() {
      int state = getInitStateAsInt();
      if (state == CLASS_STATE_UNPARSABLE_BY_GC) {
@@ -247,6 +256,61 @@
 
   public static long getHeaderSize() { return headerSize; }
 
+  public short getFieldAccessFlags(int index) {
+    return getFields().getShortAt(index * FIELD_SLOTS + ACCESS_FLAGS_OFFSET);
+  }
+
+  public short getFieldNameIndex(int index) {
+    if (index >= getJavaFieldsCount()) throw new IndexOutOfBoundsException("not a Java field;");
+    return getFields().getShortAt(index * FIELD_SLOTS + NAME_INDEX_OFFSET);
+  }
+
+  public Symbol getFieldName(int index) {
+    int nameIndex = getFields().getShortAt(index * FIELD_SLOTS + NAME_INDEX_OFFSET);
+    if (index < getJavaFieldsCount()) {
+      return getConstants().getSymbolAt(nameIndex);
+    } else {
+      return vmSymbols.symbolAt(nameIndex);
+    }
+  }
+
+  public short getFieldSignatureIndex(int index) {
+    if (index >= getJavaFieldsCount()) throw new IndexOutOfBoundsException("not a Java field;");
+    return getFields().getShortAt(index * FIELD_SLOTS + SIGNATURE_INDEX_OFFSET);
+  }
+
+  public Symbol getFieldSignature(int index) {
+    int signatureIndex = getFields().getShortAt(index * FIELD_SLOTS + SIGNATURE_INDEX_OFFSET);
+    if (index < getJavaFieldsCount()) {
+      return getConstants().getSymbolAt(signatureIndex);
+    } else {
+      return vmSymbols.symbolAt(signatureIndex);
+    }
+  }
+
+  public short getFieldGenericSignatureIndex(int index) {
+    return getFields().getShortAt(index * FIELD_SLOTS + GENERIC_SIGNATURE_INDEX_OFFSET);
+  }
+
+  public Symbol getFieldGenericSignature(int index) {
+    short genericSignatureIndex = getFieldGenericSignatureIndex(index);
+    if (genericSignatureIndex != 0)  {
+      return getConstants().getSymbolAt(genericSignatureIndex);
+    }
+    return null;
+  }
+
+  public short getFieldInitialValueIndex(int index) {
+    if (index >= getJavaFieldsCount()) throw new IndexOutOfBoundsException("not a Java field;");
+    return getFields().getShortAt(index * FIELD_SLOTS + INITVAL_INDEX_OFFSET);
+  }
+
+  public int getFieldOffset(int index) {
+    TypeArray fields = getFields();
+    return VM.getVM().buildIntFromShorts(fields.getShortAt(index * FIELD_SLOTS + LOW_OFFSET),
+                                         fields.getShortAt(index * FIELD_SLOTS + HIGH_OFFSET));
+  }
+
   // Accessors for declared fields
   public Klass     getArrayKlasses()        { return (Klass)        arrayKlasses.getValue(this); }
   public ObjArray  getMethods()             { return (ObjArray)     methods.getValue(this); }
@@ -257,6 +321,8 @@
   public Klass     getImplementor()         { return (Klass)        implementors[0].getValue(this); }
   public Klass     getImplementor(int i)    { return (Klass)        implementors[i].getValue(this); }
   public TypeArray getFields()              { return (TypeArray)    fields.getValue(this); }
+  public int       getJavaFieldsCount()     { return                (int) javaFieldsCount.getValue(this); }
+  public int       getAllFieldsCount()      { return                (int)getFields().getLength() / FIELD_SLOTS; }
   public ConstantPool getConstants()        { return (ConstantPool) constants.getValue(this); }
   public Oop       getClassLoader()         { return                classLoader.getValue(this); }
   public Oop       getProtectionDomain()    { return                protectionDomain.getValue(this); }
@@ -479,12 +545,10 @@
   }
 
   void iterateStaticFieldsInternal(OopVisitor visitor) {
-    TypeArray fields = getFields();
-    int length = (int) fields.getLength();
-    for (int index = 0; index < length; index += NEXT_OFFSET) {
-      short accessFlags    = fields.getShortAt(index + ACCESS_FLAGS_OFFSET);
-      short signatureIndex = fields.getShortAt(index + SIGNATURE_INDEX_OFFSET);
-      FieldType   type   = new FieldType(getConstants().getSymbolAt(signatureIndex));
+    int length = getJavaFieldsCount();
+    for (int index = 0; index < length; index++) {
+      short accessFlags    = getFieldAccessFlags(index);
+      FieldType   type   = new FieldType(getFieldSignature(index));
       AccessFlags access = new AccessFlags(accessFlags);
       if (access.isStatic()) {
         visitField(visitor, type, index);
@@ -496,18 +560,24 @@
     return getSuper();
   }
 
+  public static class StaticField {
+    public AccessFlags flags;
+    public Field field;
+
+    StaticField(Field field, AccessFlags flags) {
+      this.field = field;
+      this.flags = flags;
+    }
+  }
+
   public void iterateNonStaticFields(OopVisitor visitor, Oop obj) {
     if (getSuper() != null) {
       ((InstanceKlass) getSuper()).iterateNonStaticFields(visitor, obj);
     }
-    TypeArray fields = getFields();
-
-    int length = (int) fields.getLength();
-    for (int index = 0; index < length; index += NEXT_OFFSET) {
-      short accessFlags    = fields.getShortAt(index + ACCESS_FLAGS_OFFSET);
-      short signatureIndex = fields.getShortAt(index + SIGNATURE_INDEX_OFFSET);
-
-      FieldType   type   = new FieldType(getConstants().getSymbolAt(signatureIndex));
+    int length = getJavaFieldsCount();
+    for (int index = 0; index < length; index++) {
+      short accessFlags    = getFieldAccessFlags(index);
+      FieldType   type   = new FieldType(getFieldSignature(index));
       AccessFlags access = new AccessFlags(accessFlags);
       if (!access.isStatic()) {
         visitField(visitor, type, index);
@@ -517,14 +587,10 @@
 
   /** Field access by name. */
   public Field findLocalField(Symbol name, Symbol sig) {
-    TypeArray fields = getFields();
-    int n = (int) fields.getLength();
-    ConstantPool cp = getConstants();
-    for (int i = 0; i < n; i += NEXT_OFFSET) {
-      int nameIndex = fields.getShortAt(i + NAME_INDEX_OFFSET);
-      int sigIndex  = fields.getShortAt(i + SIGNATURE_INDEX_OFFSET);
-      Symbol f_name = cp.getSymbolAt(nameIndex);
-      Symbol f_sig  = cp.getSymbolAt(sigIndex);
+    int length = getJavaFieldsCount();
+    for (int i = 0; i < length; i++) {
+      Symbol f_name = getFieldName(i);
+      Symbol f_sig  = getFieldSignature(i);
       if (name.equals(f_name) && sig.equals(f_sig)) {
         return newField(i);
       }
@@ -599,8 +665,8 @@
 
   /** Get field by its index in the fields array. Only designed for
       use in a debugging system. */
-  public Field getFieldByIndex(int fieldArrayIndex) {
-    return newField(fieldArrayIndex);
+  public Field getFieldByIndex(int fieldIndex) {
+    return newField(fieldIndex);
   }
 
 
@@ -611,11 +677,9 @@
     public List getImmediateFields() {
         // A list of Fields for each field declared in this class/interface,
         // not including inherited fields.
-        TypeArray fields = getFields();
-
-        int length = (int) fields.getLength();
-        List immediateFields = new ArrayList(length / NEXT_OFFSET);
-        for (int index = 0; index < length; index += NEXT_OFFSET) {
+        int length = getJavaFieldsCount();
+        List immediateFields = new ArrayList(length);
+        for (int index = 0; index < length; index++) {
             immediateFields.add(getFieldByIndex(index));
         }
 
@@ -802,9 +866,7 @@
 
   // Creates new field from index in fields TypeArray
   private Field newField(int index) {
-    TypeArray fields = getFields();
-    short signatureIndex = fields.getShortAt(index + SIGNATURE_INDEX_OFFSET);
-    FieldType type = new FieldType(getConstants().getSymbolAt(signatureIndex));
+    FieldType type = new FieldType(getFieldSignature(index));
     if (type.isOop()) {
      if (VM.getVM().isCompressedOopsEnabled()) {
         return new NarrowOopField(this, index);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/JumpData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+// JumpData
+//
+// A JumpData is used to access profiling information for a direct
+// branch.  It is a counter, used for counting the number of branches,
+// plus a data displacement, used for realigning the data pointer to
+// the corresponding target bci.
+public class JumpData extends ProfileData {
+  static final int   takenOffSet = 0;
+  static final int     displacementOffSet = 1;
+  static final int     jumpCellCount = 2;
+
+  public JumpData(DataLayout layout) {
+    super(layout);
+    //assert(layout.tag() == DataLayout.jumpDataTag ||
+    //       layout.tag() == DataLayout.branchDataTag, "wrong type");
+  }
+
+  static int staticCellCount() {
+    return jumpCellCount;
+  }
+
+  public int cellCount() {
+    return staticCellCount();
+  }
+
+  // Direct accessor
+  int taken() {
+    return uintAt(takenOffSet);
+  }
+
+  int displacement() {
+    return intAt(displacementOffSet);
+  }
+
+  // Code generation support
+  static int takenOffset() {
+    return cellOffset(takenOffSet);
+  }
+
+  static int displacementOffset() {
+    return cellOffset(displacementOffSet);
+  }
+
+  public void printDataOn(PrintStream st) {
+    printShared(st, "JumpData");
+    st.println("taken(" + taken() + ") displacement(" + displacement() + ")");
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Thu Dec 22 15:46:11 2011 +0000
@@ -49,6 +49,7 @@
     Type type                  = db.lookupType("methodOopDesc");
     constMethod                = new OopField(type.getOopField("_constMethod"), 0);
     constants                  = new OopField(type.getOopField("_constants"), 0);
+    methodData                 = new OopField(type.getOopField("_method_data"), 0);
     methodSize                 = new CIntField(type.getCIntegerField("_method_size"), 0);
     maxStack                   = new CIntField(type.getCIntegerField("_max_stack"), 0);
     maxLocals                  = new CIntField(type.getCIntegerField("_max_locals"), 0);
@@ -58,9 +59,13 @@
     vtableIndex                = new CIntField(type.getCIntegerField("_vtable_index"), 0);
     if (!VM.getVM().isCore()) {
       invocationCounter        = new CIntField(type.getCIntegerField("_invocation_counter"), 0);
+      backedgeCounter          = new CIntField(type.getCIntegerField("_backedge_counter"), 0);
     }
     bytecodeOffset = type.getSize();
 
+    interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0);
+    interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0);
+
     /*
     interpreterEntry           = type.getAddressField("_interpreter_entry");
     fromCompiledCodeEntryPoint = type.getAddressField("_from_compiled_code_entry_point");
@@ -79,6 +84,7 @@
   // Fields
   private static OopField  constMethod;
   private static OopField  constants;
+  private static OopField  methodData;
   private static CIntField methodSize;
   private static CIntField maxStack;
   private static CIntField maxLocals;
@@ -86,10 +92,14 @@
   private static CIntField accessFlags;
   private static CIntField vtableIndex;
   private static CIntField invocationCounter;
+  private static CIntField backedgeCounter;
   private static long      bytecodeOffset;
 
   private static AddressField       code;
 
+  private static CIntField interpreterThrowoutCountField;
+  private static CIntField interpreterInvocationCountField;
+
   // constant method names - <init>, <clinit>
   // Initialized lazily to avoid initialization ordering dependencies between Method and SymbolTable
   private static Symbol objectInitializerName;
@@ -116,6 +126,7 @@
   // Accessors for declared fields
   public ConstMethod  getConstMethod()                { return (ConstMethod)  constMethod.getValue(this);       }
   public ConstantPool getConstants()                  { return (ConstantPool) constants.getValue(this);         }
+  public MethodData   getMethodData()                 { return (MethodData) methodData.getValue(this);          }
   public TypeArray    getExceptionTable()             { return getConstMethod().getExceptionTable();            }
   /** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
   public long         getMethodSize()                 { return                methodSize.getValue(this);        }
@@ -134,6 +145,12 @@
     }
     return invocationCounter.getValue(this);
   }
+  public long         getBackedgeCounter()          {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(!VM.getVM().isCore(), "must not be used in core build");
+    }
+    return backedgeCounter.getValue(this);
+  }
 
   // get associated compiled native method, if available, else return null.
   public NMethod getNativeMethod() {
@@ -180,12 +197,24 @@
     return getConstMethod().getBytecodeShortArg(bci);
   }
 
+  /** Fetches a 16-bit native ordered value from the
+      bytecode stream */
+  public short getNativeShortArg(int bci) {
+    return getConstMethod().getNativeShortArg(bci);
+  }
+
   /** Fetches a 32-bit big-endian ("Java ordered") value from the
       bytecode stream */
   public int getBytecodeIntArg(int bci) {
     return getConstMethod().getBytecodeIntArg(bci);
   }
 
+  /** Fetches a 32-bit native ordered value from the
+      bytecode stream */
+  public int getNativeIntArg(int bci) {
+    return getConstMethod().getNativeIntArg(bci);
+  }
+
   public byte[] getByteCode() {
     return getConstMethod().getByteCode();
   }
@@ -321,4 +350,11 @@
     buf.append(")");
     return buf.toString().replace('/', '.');
   }
+  public int interpreterThrowoutCount() {
+    return (int) interpreterThrowoutCountField.getValue(getHandle());
+  }
+
+  public int interpreterInvocationCount() {
+    return (int) interpreterInvocationCountField.getValue(getHandle());
+  }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/MethodData.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/MethodData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,92 @@
 // A MethodData provides interpreter profiling information
 
 public class MethodData extends Oop {
+  static int TypeProfileWidth = 2;
+  static int BciProfileWidth = 2;
+  static int CompileThreshold;
+
+  static int Reason_many;                 // indicates presence of several reasons
+  static int Reason_none;                 // indicates absence of a relevant deopt.
+  static int Reason_LIMIT;
+  static int Reason_RECORDED_LIMIT;       // some are not recorded per bc
+
+  private static String[] trapReasonName;
+
+  static String trapReasonName(int reason) {
+    if (reason == Reason_many)  return "many";
+    if (reason < Reason_LIMIT)
+      return trapReasonName[reason];
+    return "reason" + reason;
+  }
+
+
+  static int trapStateReason(int trapState) {
+    // This assert provides the link between the width of DataLayout.trapBits
+    // and the encoding of "recorded" reasons.  It ensures there are enough
+    // bits to store all needed reasons in the per-BCI MDO profile.
+    // assert(dsReasonMask >= reasonRecordedLimit, "enough bits");
+    int recompileBit = (trapState & dsRecompileBit);
+    trapState -= recompileBit;
+    if (trapState == dsReasonMask) {
+      return Reason_many;
+    } else {
+      // assert((int)reasonNone == 0, "state=0 => Reason_none");
+      return trapState;
+    }
+  }
+
+
+  static final int dsReasonMask   = DataLayout.trapMask >> 1;
+  static final int dsRecompileBit = DataLayout.trapMask - dsReasonMask;
+
+  static boolean trapStateIsRecompiled(int trapState) {
+    return (trapState & dsRecompileBit) != 0;
+  }
+
+  static boolean reasonIsRecordedPerBytecode(int reason) {
+    return reason > Reason_none && reason < Reason_RECORDED_LIMIT;
+  }
+  static int trapStateAddReason(int trapState, int reason) {
+    // assert(reasonIsRecordedPerBytecode((DeoptReason)reason) || reason == reasonMany, "valid reason");
+    int recompileBit = (trapState & dsRecompileBit);
+    trapState -= recompileBit;
+    if (trapState == dsReasonMask) {
+      return trapState + recompileBit;     // already at state lattice bottom
+    } else if (trapState == reason) {
+      return trapState + recompileBit;     // the condition is already true
+    } else if (trapState == 0) {
+      return reason + recompileBit;          // no condition has yet been true
+    } else {
+      return dsReasonMask + recompileBit;  // fall to state lattice bottom
+    }
+  }
+  static int trapStateSetRecompiled(int trapState, boolean z) {
+    if (z)  return trapState |  dsRecompileBit;
+    else    return trapState & ~dsRecompileBit;
+  }
+
+  static String formatTrapState(int trapState) {
+    int reason      = trapStateReason(trapState);
+    boolean     recompFlag = trapStateIsRecompiled(trapState);
+    // Re-encode the state from its decoded components.
+    int decodedState = 0;
+    if (reasonIsRecordedPerBytecode(reason) || reason == Reason_many)
+      decodedState = trapStateAddReason(decodedState, reason);
+    if (recompFlag)
+      decodedState = trapStateSetRecompiled(decodedState, recompFlag);
+    // If the state re-encodes properly, format it symbolically.
+    // Because this routine is used for debugging and diagnostics,
+    // be robust even if the state is a strange value.
+    if (decodedState != trapState) {
+      // Random buggy state that doesn't decode??
+      return "#" + trapState;
+    } else {
+      return trapReasonName(reason) + (recompFlag ? " recompiled" : "");
+    }
+  }
+
+
+
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -48,7 +134,58 @@
 
     size           = new CIntField(type.getCIntegerField("_size"), 0);
     method         = new OopField(type.getOopField("_method"), 0);
-    // FIXME: add more fields and accessors
+
+    VM.Flag[] flags = VM.getVM().getCommandLineFlags();
+    for (int f = 0; f < flags.length; f++) {
+      VM.Flag flag = flags[f];
+      if (flag.getName().equals("TypeProfileWidth")) {
+        TypeProfileWidth = (int)flag.getIntx();
+      } else if (flag.getName().equals("BciProfileWidth")) {
+        BciProfileWidth = (int)flag.getIntx();
+      } else if (flag.getName().equals("CompileThreshold")) {
+        CompileThreshold = (int)flag.getIntx();
+      }
+    }
+
+    cellSize = (int)VM.getVM().getAddressSize();
+
+    dataSize     = new CIntField(type.getCIntegerField("_data_size"), 0);
+    data         = type.getAddressField("_data[0]");
+
+    sizeofMethodDataOopDesc = (int)type.getSize();;
+
+    Reason_many            = db.lookupIntConstant("Deoptimization::Reason_many").intValue();
+    Reason_none            = db.lookupIntConstant("Deoptimization::Reason_none").intValue();
+    Reason_LIMIT           = db.lookupIntConstant("Deoptimization::Reason_LIMIT").intValue();
+    Reason_RECORDED_LIMIT  = db.lookupIntConstant("Deoptimization::Reason_RECORDED_LIMIT").intValue();
+
+    trapReasonName = new String[Reason_LIMIT];
+
+    // Find Deopt reasons
+    Iterator i = db.getIntConstants();
+    String prefix = "Deoptimization::Reason_";
+    while (i.hasNext()) {
+      String name = (String)i.next();
+      if (name.startsWith(prefix)) {
+        // Strip prefix
+        if (!name.endsWith("Reason_many") &&
+            !name.endsWith("Reason_LIMIT") &&
+            !name.endsWith("Reason_RECORDED_LIMIT")) {
+          String trimmed = name.substring(prefix.length());
+          int value = db.lookupIntConstant(name).intValue();
+          if (trapReasonName[value] != null) {
+            throw new InternalError("duplicate reasons: " + trapReasonName[value] + " " + trimmed);
+          }
+          trapReasonName[value] = trimmed;
+        }
+      }
+    }
+    for (int index = 0; index < trapReasonName.length; index++) {
+      if (trapReasonName[index] == null) {
+        throw new InternalError("missing reason for " + index);
+      }
+      System.out.println(trapReasonName[index]);
+    }
   }
 
   MethodData(OopHandle handle, ObjectHeap heap) {
@@ -60,6 +197,11 @@
   private static long baseOffset;
   private static CIntField size;
   private static OopField  method;
+  private static CIntField dataSize;
+  private static AddressField data;
+
+  public static int sizeofMethodDataOopDesc;
+  public static int cellSize;
 
   public long getObjectSize() {
     return alignObjectSize(size.getValue(this));
@@ -81,4 +223,119 @@
       visitor.doCInt(size, true);
     }
   }
+
+  int dataSize() {
+    if (dataSize == null) {
+      return 0;
+    } else {
+      return (int)dataSize.getValue(this);
+    }
+  }
+
+  boolean outOfBounds(int dataIndex) {
+    return dataIndex >= dataSize();
+  }
+
+  ProfileData dataAt(int dataIndex) {
+    if (outOfBounds(dataIndex)) {
+      return null;
+    }
+    DataLayout dataLayout = new DataLayout(this, dataIndex + (int)data.getOffset());
+
+    switch (dataLayout.tag()) {
+    case DataLayout.noTag:
+    default:
+      throw new InternalError(dataIndex + " " + dataSize() + " " + dataLayout.tag());
+    case DataLayout.bitDataTag:
+      return new BitData(dataLayout);
+    case DataLayout.counterDataTag:
+      return new CounterData(dataLayout);
+    case DataLayout.jumpDataTag:
+      return new JumpData(dataLayout);
+    case DataLayout.receiverTypeDataTag:
+      return new ReceiverTypeData(dataLayout);
+    case DataLayout.virtualCallDataTag:
+      return new VirtualCallData(dataLayout);
+    case DataLayout.retDataTag:
+      return new RetData(dataLayout);
+    case DataLayout.branchDataTag:
+      return new BranchData(dataLayout);
+    case DataLayout.multiBranchDataTag:
+      return new MultiBranchData(dataLayout);
+    }
+  }
+
+  int dpToDi(int dp) {
+    // this in an offset from the base of the MDO, so convert to offset into _data
+    return dp - (int)data.getOffset();
+  }
+
+  int firstDi() { return 0; }
+  public ProfileData firstData() { return dataAt(firstDi()); }
+  public ProfileData nextData(ProfileData current) {
+    int currentIndex = dpToDi(current.dp());
+    int nextIndex = currentIndex + current.sizeInBytes();
+    return dataAt(nextIndex);
+  }
+  boolean isValid(ProfileData current) { return current != null; }
+
+  public void printDataOn(PrintStream st) {
+    ProfileData data = firstData();
+    for ( ; isValid(data); data = nextData(data)) {
+      st.print(dpToDi(data.dp()));
+      st.print(" ");
+      // st->fillTo(6);
+      data.printDataOn(st);
+    }
+  }
+
+  private byte[] fetchDataAt(Address base, long offset, long size) {
+    byte[] result = new byte[(int)size];
+    for (int i = 0; i < size; i++) {
+      result[i] = base.getJByteAt(offset + i);
+    }
+    return result;
+  }
+
+  public byte[] orig() {
+    // fetch the orig methodDataOopDesc data between header and dataSize
+    return fetchDataAt(this.getHandle(), 0, sizeofMethodDataOopDesc);
+  }
+
+  public long[] data() {
+    // Read the data as an array of intptr_t elements
+    OopHandle base = getHandle();
+    long offset = data.getOffset();
+    int elements = dataSize() / cellSize;
+    long[] result = new long[elements];
+    for (int i = 0; i < elements; i++) {
+      Address value = base.getAddressAt(offset + i * MethodData.cellSize);
+      if (value != null) {
+        result[i] = value.minus(null);
+      }
+    }
+    return result;
+  }
+
+  // Get a measure of how much mileage the method has on it.
+  int mileageOf(Method method) {
+    long mileage = 0;
+    int iic = method.interpreterInvocationCount();
+    if (mileage < iic)  mileage = iic;
+
+    long ic = method.getInvocationCounter();
+    long bc = method.getBackedgeCounter();
+
+    long icval = ic >> 3;
+    if ((ic & 4) != 0) icval += CompileThreshold;
+    if (mileage < icval)  mileage = icval;
+    long bcval = bc >> 3;
+    if ((bc & 4) != 0) bcval += CompileThreshold;
+    if (mileage < bcval)  mileage = bcval;
+    return (int)mileage;
+  }
+
+  public int currentMileage() {
+    return 20000;
+  }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/MultiBranchData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+// MultiBranchData
+//
+// A MultiBranchData is used to access profiling information for
+// a multi-way branch (*switch bytecodes).  It consists of a series
+// of (count, displacement) pairs, which count the number of times each
+// case was taken and specify the data displacment for each branch target.
+public class MultiBranchData extends ArrayData {
+  static final int   defaultCountOffSet = 0;
+  static final int     defaultDisaplacementOffSet = 1;
+  static final int     caseArrayStart = 2;
+  static final int   relativeCountOffSet = 0;
+  static final int     relativeDisplacementOffSet = 1;
+  static final int     perCaseCellCount = 2;
+
+  public MultiBranchData(DataLayout layout) {
+    super(layout);
+    //assert(layout.tag() == DataLayout.multiBranchDataTag, "wrong type");
+  }
+
+  // static int computeCellCount(BytecodeStream stream);
+
+  int numberOfCases() {
+    int alen = arrayLen() - 2; // get rid of default case here.
+    //assert(alen % perCaseCellCount == 0, "must be even");
+    return (alen / perCaseCellCount);
+  }
+
+  int defaultCount() {
+    return arrayUintAt(defaultCountOffSet);
+  }
+  int defaultDisplacement() {
+    return arrayIntAt(defaultDisaplacementOffSet);
+  }
+
+  int countAt(int index) {
+    return arrayUintAt(caseArrayStart +
+                         index * perCaseCellCount +
+                         relativeCountOffSet);
+  }
+  int displacementAt(int index) {
+    return arrayIntAt(caseArrayStart +
+                        index * perCaseCellCount +
+                        relativeDisplacementOffSet);
+  }
+
+  // Code generation support
+  static int defaultCountOffset() {
+    return arrayElementOffset(defaultCountOffSet);
+  }
+  static int defaultDisplacementOffset() {
+    return arrayElementOffset(defaultDisaplacementOffSet);
+  }
+  static int caseCountOffset(int index) {
+    return caseArrayOffset() +
+      (perCaseSize() * index) +
+      relativeCountOffset();
+  }
+  static int caseArrayOffset() {
+    return arrayElementOffset(caseArrayStart);
+  }
+  static int perCaseSize() {
+    return (perCaseCellCount) * MethodData.cellSize;
+  }
+  static int relativeCountOffset() {
+    return (relativeCountOffSet) * MethodData.cellSize;
+  }
+  static int relativeDisplacementOffset() {
+    return (relativeDisplacementOffSet) * MethodData.cellSize;
+  }
+
+  public void printDataOn(PrintStream st) {
+    printShared(st, "MultiBranchData");
+    st.println("default_count(" + defaultCount() + ") displacement(" + defaultDisplacement() + ")");
+    int cases = numberOfCases();
+    for (int i = 0; i < cases; i++) {
+      tab(st);
+      st.println("count(" + countAt(i) + ") displacement(" + displacementAt(i) + ")");
+    }
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Thu Dec 22 15:46:11 2011 +0000
@@ -33,6 +33,7 @@
 
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.gc_interface.*;
+import sun.jvm.hotspot.gc_implementation.g1.*;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.runtime.*;
@@ -514,9 +515,16 @@
 
   private void addPermGenLiveRegions(List output, CollectedHeap heap) {
     LiveRegionsCollector lrc = new LiveRegionsCollector(output);
-    if (heap instanceof GenCollectedHeap) {
-       GenCollectedHeap genHeap = (GenCollectedHeap) heap;
-       Generation gen = genHeap.permGen();
+    if (heap instanceof SharedHeap) {
+       if (Assert.ASSERTS_ENABLED) {
+          Assert.that(heap instanceof GenCollectedHeap ||
+                      heap instanceof G1CollectedHeap,
+                      "Expecting GenCollectedHeap or G1CollectedHeap, " +
+                      "but got " + heap.getClass().getName());
+       }
+       // Handles both GenCollectedHeap and G1CollectedHeap
+       SharedHeap sharedHeap = (SharedHeap) heap;
+       Generation gen = sharedHeap.permGen();
        gen.spaceIterate(lrc, true);
     } else if (heap instanceof ParallelScavengeHeap) {
        ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
@@ -524,8 +532,9 @@
        addLiveRegions(permGen.objectSpace().getLiveRegions(), output);
     } else {
        if (Assert.ASSERTS_ENABLED) {
-          Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
-                             heap.getClass().getName());
+          Assert.that(false,
+                      "Expecting SharedHeap or ParallelScavengeHeap, " +
+                      "but got " + heap.getClass().getName());
        }
     }
   }
@@ -588,10 +597,14 @@
        addLiveRegions(youngGen.fromSpace().getLiveRegions(), liveRegions);
        PSOldGen oldGen = psh.oldGen();
        addLiveRegions(oldGen.objectSpace().getLiveRegions(), liveRegions);
+    } else if (heap instanceof G1CollectedHeap) {
+        G1CollectedHeap g1h = (G1CollectedHeap) heap;
+        g1h.heapRegionIterate(lrc);
     } else {
        if (Assert.ASSERTS_ENABLED) {
-          Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
-                              heap.getClass().getName());
+          Assert.that(false, "Expecting GenCollectedHeap, G1CollectedHeap, " +
+                      "or ParallelScavengeHeap, but got " +
+                      heap.getClass().getName());
        }
     }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java	Thu Dec 22 15:46:11 2011 +0000
@@ -112,6 +112,32 @@
     return buf.toString();
   }
 
+  public static String escapeString(String s) {
+    StringBuilder sb = null;
+    for (int index = 0; index < s.length(); index++) {
+      char value = s.charAt(index);
+      if (value >= 32 && value < 127 || value == '\'' || value == '\\') {
+        if (sb != null) {
+          sb.append(value);
+        }
+      } else {
+        if (sb == null) {
+          sb = new StringBuilder(s.length() * 2);
+          sb.append(s, 0, index);
+        }
+        sb.append("\\u");
+        if (value < 0x10) sb.append("000");
+        else if (value < 0x100) sb.append("00");
+        else if (value < 0x1000) sb.append("0");
+        sb.append(Integer.toHexString(value));
+      }
+    }
+    if (sb != null) {
+      return sb.toString();
+    }
+    return s;
+  }
+
   public static String stringOopToString(Oop stringOop) {
     if (offsetField == null) {
       InstanceKlass k = (InstanceKlass) stringOop.getKlass();
@@ -129,6 +155,10 @@
                              countField.getValue(stringOop));
   }
 
+  public static String stringOopToEscapedString(Oop stringOop) {
+    return escapeString(stringOopToString(stringOop));
+  }
+
   private static void initThreadGroupFields() {
     if (threadGroupParentField == null) {
       SystemDictionary sysDict = VM.getVM().getSystemDictionary();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ProfileData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+public abstract class ProfileData {
+  // This is a pointer to a section of profiling data.
+  private DataLayout _data;
+
+  public DataLayout data() { return _data; }
+
+  // How many cells are in this?
+  public abstract int cellCount();
+
+
+  // Return the size of this data.
+  public int sizeInBytes() {
+    return DataLayout.computeSizeInBytes(cellCount());
+  }
+
+  public int dp() {
+    return data().dp();
+  }
+
+  // Low-level accessors for underlying data
+  int intptrAt(int index) {
+    //assert(0 <= index && index < cellCount(), "oob");
+    return data().cellAt(index);
+  }
+  int intAt(int index) {
+    return (int)intptrAt(index);
+  }
+  int uintAt(int index) {
+    return (int)intptrAt(index);
+  }
+  Oop oopAt(int index) {
+    return data().oopAt(index);
+  }
+
+  public Address addressAt(int index) {
+    return data().addressAt(index);
+  }
+
+  boolean flagAt(int flagNumber) {
+    return data().flagAt(flagNumber);
+  }
+
+  // two convenient imports for use by subclasses:
+  public static int cellOffset(int index) {
+    return DataLayout.cellOffset(index);
+  }
+
+  public ProfileData(DataLayout data) {
+    _data = data;
+  }
+
+  // Constructor for invalid ProfileData.
+  ProfileData() {
+    _data = null;
+  }
+
+  int bci() {
+    return data().bci();
+  }
+
+  int trapState() {
+    return data().trapState();
+  }
+  public abstract void printDataOn(PrintStream st);
+
+  void tab(PrintStream st) {
+    st.print("\t");
+  }
+
+  void printShared(PrintStream st, String name) {
+    st.print("bci: " + bci());
+    // st.fillTo(tabWidthOne);
+    st.print(" " +  name + " ");
+    tab(st);
+    int trap = trapState();
+    if (trap != 0) {
+      st.print("trap(" + MethodData.formatTrapState(trap) + ") ");
+    }
+    int flags = data().flags();
+    if (flags != 0)
+      st.print("flags(" + flags + ") ");
+  }
+
+  public String toString() {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream ps = new PrintStream(baos);
+    try {
+      printDataOn(ps);
+    } finally {
+      ps.close();
+    }
+    return baos.toString();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ReceiverTypeData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+// ReceiverTypeData
+//
+// A ReceiverTypeData is used to access profiling information about a
+// dynamic type check.  It consists of a counter which counts the total times
+// that the check is reached, and a series of (Klass, count) pairs
+// which are used to store a type profile for the receiver of the check.
+public class ReceiverTypeData extends CounterData {
+  static final int   receiver0Offset = counterCellCount;
+  static final int     count0Offset = receiver0Offset + 1;
+  static final int     receiverTypeRowCellCount = (count0Offset + 1) - receiver0Offset;
+
+  public ReceiverTypeData(DataLayout layout) {
+    super(layout);
+    //assert(layout.tag() == DataLayout.receiverTypeDataTag ||
+    //       layout.tag() == DataLayout.virtualCallDataTag, "wrong type");
+  }
+
+  boolean isReceivertypedata() { return true; }
+
+  static int staticCellCount() {
+    return counterCellCount + MethodData.TypeProfileWidth * receiverTypeRowCellCount;
+  }
+
+  public int cellCount() {
+    return staticCellCount();
+  }
+
+  // Direct accessors
+  public static int rowLimit() {
+    return MethodData.TypeProfileWidth;
+  }
+  public static int receiverCellIndex(int row) {
+    return receiver0Offset + row * receiverTypeRowCellCount;
+  }
+  public static int receiverCountCellIndex(int row) {
+    return count0Offset + row * receiverTypeRowCellCount;
+  }
+
+  // Get the receiver at row.  The 'unchecked' version is needed by parallel old
+  // gc; it does not assert the receiver is a klass.  During compaction of the
+  // perm gen, the klass may already have moved, so the isKlass() predicate
+  // would fail.  The 'normal' version should be used whenever possible.
+  Klass receiverUnchecked(int row) {
+    //assert(row < rowLimit(), "oob");
+    Oop recv = oopAt(receiverCellIndex(row));
+    return (Klass)recv;
+  }
+
+  public Klass receiver(int row) {
+    Klass recv = receiverUnchecked(row);
+    //assert(recv == NULL || ((oop)recv).isKlass(), "wrong type");
+    return recv;
+  }
+
+  public int receiverCount(int row) {
+    //assert(row < rowLimit(), "oob");
+    return uintAt(receiverCountCellIndex(row));
+  }
+
+  // Code generation support
+  static int receiverOffset(int row) {
+    return cellOffset(receiverCellIndex(row));
+  }
+  static int receiverCountOffset(int row) {
+    return cellOffset(receiverCountCellIndex(row));
+  }
+  static int receiverTypeDataSize() {
+    return cellOffset(staticCellCount());
+  }
+
+  void printReceiverDataOn(PrintStream st) {
+    int row;
+    int entries = 0;
+    for (row = 0; row < rowLimit(); row++) {
+      if (receiver(row) != null)  entries++;
+    }
+    st.println("count(" + count() + ") entries(" + entries + ")");
+    for (row = 0; row < rowLimit(); row++) {
+      if (receiver(row) != null) {
+        tab(st);
+        receiver(row).printValueOn(st);
+        st.println("(" + receiverCount(row) + ")");
+      }
+    }
+  }
+  public void printDataOn(PrintStream st) {
+    printShared(st, "ReceiverTypeData");
+    printReceiverDataOn(st);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/RetData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+// RetData
+//
+// A RetData is used to access profiling information for a ret bytecode.
+// It is composed of a count of the number of times that the ret has
+// been executed, followed by a series of triples of the form
+// (bci, count, di) which count the number of times that some bci was the
+// target of the ret and cache a corresponding data displacement.
+public class RetData extends CounterData {
+
+  static final int   bci0Offset = counterCellCount;
+  static final int     count0Offset = bci0Offset + 1;
+  static final int     displacement0Offset = count0Offset + 1;
+  static final int     retRowCellCount = (displacement0Offset + 1) - bci0Offset;
+
+  public RetData(DataLayout layout) {
+    super(layout);
+    //assert(layout.tag() == DataLayout.retDataTag, "wrong type");
+  }
+
+  static final int noBci = -1; // value of bci when bci1/2 are not in use.
+
+  static int staticCellCount() {
+    return counterCellCount + MethodData.BciProfileWidth * retRowCellCount;
+  }
+
+  public int cellCount() {
+    return staticCellCount();
+  }
+
+  static int rowLimit() {
+    return MethodData.BciProfileWidth;
+  }
+  static int bciCellIndex(int row) {
+    return bci0Offset + row * retRowCellCount;
+  }
+  static int bciCountCellIndex(int row) {
+    return count0Offset + row * retRowCellCount;
+  }
+  static int bciDisplacementCellIndex(int row) {
+    return displacement0Offset + row * retRowCellCount;
+  }
+
+  // Direct accessors
+  int bci(int row) {
+    return intAt(bciCellIndex(row));
+  }
+  int bciCount(int row) {
+    return uintAt(bciCountCellIndex(row));
+  }
+  int bciDisplacement(int row) {
+    return intAt(bciDisplacementCellIndex(row));
+  }
+
+  // Code generation support
+  static int bciOffset(int row) {
+    return cellOffset(bciCellIndex(row));
+  }
+  static int bciCountOffset(int row) {
+    return cellOffset(bciCountCellIndex(row));
+  }
+  static int bciDisplacementOffset(int row) {
+    return cellOffset(bciDisplacementCellIndex(row));
+  }
+
+  public void printDataOn(PrintStream st) {
+    printShared(st, "RetData");
+    int row;
+    int entries = 0;
+    for (row = 0; row < rowLimit(); row++) {
+      if (bci(row) != noBci)  entries++;
+    }
+    st.println("count(" + count() + ") entries(" + entries + ")");
+    for (row = 0; row < rowLimit(); row++) {
+      if (bci(row) != noBci) {
+        tab(st);
+        st.println(" bci(" + bci(row) + ": count(" + bciCount(row) + ") displacement(" + bciDisplacement(row) + "))");
+      }
+    }
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/TypeArray.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/TypeArray.java	Thu Dec 22 15:46:11 2011 +0000
@@ -53,6 +53,9 @@
   public boolean isTypeArray()         { return true; }
 
   public byte getByteAt(long index) {
+    if (index < 0 || index >= getLength()) {
+      throw new ArrayIndexOutOfBoundsException(index + " " + getLength());
+    }
     long offset = baseOffsetInBytes(BasicType.T_BYTE) + index * getHeap().getByteSize();
     return getHandle().getJByteAt(offset);
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/VirtualCallData.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+// VirtualCallData
+//
+// A VirtualCallData is used to access profiling information about a
+// call.  For now, it has nothing more than a ReceiverTypeData.
+public class VirtualCallData extends ReceiverTypeData {
+  public VirtualCallData(DataLayout layout) {
+    super(layout);
+    //assert(layout.tag() == DataLayout.virtualCallDataTag, "wrong type");
+  }
+
+  static int staticCellCount() {
+    // At this point we could add more profile state, e.g., for arguments.
+    // But for now it's the same size as the base record type.
+    return ReceiverTypeData.staticCellCount();
+  }
+
+  public int cellCount() {
+    return staticCellCount();
+  }
+
+  // Direct accessors
+  static int virtualCallDataSize() {
+    return cellOffset(staticCellCount());
+  }
+
+  public void printDataOn(PrintStream st) {
+    printShared(st, "VirtualCallData");
+    printReceiverDataOn(st);
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/java_lang_Class.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/java_lang_Class.java	Thu Dec 22 15:46:11 2011 +0000
@@ -55,13 +55,13 @@
     // klass and oop_size are HotSpot magic fields and hence we can't
     // find them from InstanceKlass for java.lang.Class.
     Type jlc = db.lookupType("java_lang_Class");
-    int klassOffset = (int) jlc.getCIntegerField("klass_offset").getValue();
+    int klassOffset = (int) jlc.getCIntegerField("_klass_offset").getValue();
     if (VM.getVM().isCompressedOopsEnabled()) {
       klassField = new NarrowOopField(new NamedFieldIdentifier("klass"), klassOffset, true);
     } else {
       klassField = new OopField(new NamedFieldIdentifier("klass"), klassOffset, true);
     }
-    int oopSizeOffset = (int) jlc.getCIntegerField("oop_size_offset").getValue();
+    int oopSizeOffset = (int) jlc.getCIntegerField("_oop_size_offset").getValue();
     oopSizeField = new IntField(new NamedFieldIdentifier("oop_size"), oopSizeOffset, true);
   }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Block.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.PrintStream;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class Block extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("Block");
+    nodesField = type.getAddressField("_nodes");
+    succsField = type.getAddressField("_succs");
+    numSuccsField = new CIntField(type.getCIntegerField("_num_succs"), 0);
+    preOrderField = new CIntField(type.getCIntegerField("_pre_order"), 0);
+    domDepthField = new CIntField(type.getCIntegerField("_dom_depth"), 0);
+    idomField = type.getAddressField("_idom");
+    freqField = type.getJFloatField("_freq");
+  }
+
+  private static AddressField nodesField;
+  private static AddressField succsField;
+  private static CIntField numSuccsField;
+  private static CIntField preOrderField;
+  private static CIntField domDepthField;
+  private static AddressField idomField;
+  private static JFloatField freqField;
+
+  public Block(Address addr) {
+    super(addr);
+  }
+
+  public int preOrder() {
+    return (int)preOrderField.getValue(getAddress());
+  }
+
+  public float freq() {
+    return (float)freqField.getValue(getAddress());
+  }
+
+  public Node_List nodes() {
+    return new Node_List(getAddress().addOffsetTo(nodesField.getOffset()));
+  }
+
+  public void dump(PrintStream out) {
+    out.print("B" + preOrder());
+    out.print(" Freq: " + freq());
+    out.println();
+    Node_List nl = nodes();
+    int cnt = nl.size();
+    for( int i=0; i<cnt; i++ )
+      nl.at(i).dump(out);
+    out.print("\n");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Block_Array.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class Block_Array extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("Block_Array");
+    sizeField = new CIntField(type.getCIntegerField("_size"), 0);
+    blocksField = type.getAddressField("_blocks");
+    arenaField = type.getAddressField("_arena");
+  }
+
+  private static CIntField sizeField;
+  private static AddressField blocksField;
+  private static AddressField arenaField;
+
+  public Block_Array(Address addr) {
+    super(addr);
+  }
+
+  public int Max() {
+    return (int) sizeField.getValue(getAddress());
+  }
+
+  public Block at(int i) {
+    return new Block(blocksField.getValue(getAddress()).getAddressAt(i * (int)VM.getVM().getAddressSize()));
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Block_List.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class Block_List extends Block_Array {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("Block_List");
+    cntField = new CIntField(type.getCIntegerField("_cnt"), 0);
+  }
+
+  private static CIntField cntField;
+
+  public Block_List(Address addr) {
+    super(addr);
+  }
+
+  public int size() {
+    return (int) cntField.getValue(getAddress());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/CallDynamicJavaNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class CallDynamicJavaNode extends CallJavaNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("CallDynamicJavaNode");
+  }
+
+
+  public CallDynamicJavaNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/CallJavaNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.*;
+import sun.jvm.hotspot.code.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.ci.*;
+import sun.jvm.hotspot.types.*;
+
+public class CallJavaNode extends CallNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("CallJavaNode");
+    methodField = type.getAddressField("_method");
+  }
+
+  private static AddressField methodField;
+
+  public CallJavaNode(Address addr) {
+    super(addr);
+  }
+
+  public ciMethod method() {
+    return (ciMethod) ciObjectFactory.get(methodField.getValue(getAddress()));
+  }
+
+  public void dumpSpec(PrintStream out) {
+    if (method() !=  null) {
+      out.print(" " + method().method().externalNameAndSignature());
+    }
+    super.dumpSpec(out);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/CallNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class CallNode extends SafePointNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("CallNode");
+    entryPointField = type.getAddressField("_entry_point");
+  }
+
+  private static AddressField entryPointField;
+
+  public Address entryPoint() {
+    return entryPointField.getValue(getAddress());
+  }
+
+  public CallNode(Address addr) {
+    super(addr);
+  }
+
+  public void dumpSpec(PrintStream out) {
+    out.print(" ");
+    // tf()->dumpOn(st);
+    // if (_cnt != countUnknown)  st->print(" C=%f",_cnt);
+    JVMState jvms = jvms();
+    if (jvms != null)  jvms.dumpSpec(out);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/CallRuntimeNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.utilities.CStringUtilities;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class CallRuntimeNode extends CallNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("CallRuntimeNode");
+    nameField    = type.getAddressField("_name");
+  }
+
+  static private AddressField nameField;
+
+  public String name() {
+    return CStringUtilities.getString(nameField.getValue(getAddress()));
+  }
+
+  public CallRuntimeNode(Address addr) {
+    super(addr);
+  }
+
+  public void dumpSpec(PrintStream out) {
+    out.print(" #");
+    out.print(name());
+    super.dumpSpec(out);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/CallStaticJavaNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.utilities.CStringUtilities;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class CallStaticJavaNode extends CallJavaNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("CallStaticJavaNode");
+    nameField    = type.getAddressField("_name");
+  }
+
+  static private AddressField nameField;
+
+  public String name() {
+    return CStringUtilities.getString(nameField.getValue(getAddress()));
+  }
+
+  public CallStaticJavaNode(Address addr) {
+    super(addr);
+  }
+
+  public void dumpSpec(PrintStream out) {
+    out.print(" Static ");
+    String name = name();
+    if (name != null) {
+      out.print(name);
+      // int trapReq = uncommonTrapRequest();
+      // if (trapReq != 0) {
+      //   char buf[100];
+      //   st->print("(%s)",
+      //             Deoptimization::formatTrapRequest(buf, sizeof(buf),
+      //                                                 trapReq));
+      // }
+      out.print(" ");
+    }
+    super.dumpSpec(out);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Compile.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.ci.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class Compile extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("Compile");
+    rootField = type.getAddressField("_root");
+    uniqueField = new CIntField(type.getCIntegerField("_unique"), 0);
+    entryBciField = new CIntField(type.getCIntegerField("_entry_bci"), 0);
+    topField = type.getAddressField("_top");
+    cfgField = type.getAddressField("_cfg");
+    regallocField = type.getAddressField("_regalloc");
+    methodField = type.getAddressField("_method");
+    iltField = type.getAddressField("_ilt");
+  }
+
+  private static AddressField rootField;
+  private static CIntField uniqueField;
+  private static CIntField entryBciField;
+  private static AddressField topField;
+  private static AddressField cfgField;
+  private static AddressField regallocField;
+  private static AddressField methodField;
+  private static AddressField iltField;
+
+  public Compile(Address addr) {
+    super(addr);
+  }
+
+  public Node root() {
+    return new RootNode(rootField.getValue(this.getAddress()));
+  }
+
+  public int entryBci() {
+    return (int)entryBciField.getValue(getAddress());
+  }
+
+  public ciMethod method() {
+    return (ciMethod) ciObjectFactory.get(methodField.getValue(getAddress()));
+  }
+
+  public PhaseCFG cfg() {
+    Address a = cfgField.getValue(this.getAddress());
+    if (a != null) {
+      return new PhaseCFG(a);
+    }
+    return null;
+  }
+
+  public InlineTree ilt() {
+    Address a = iltField.getValue(this.getAddress());
+    if (a != null) {
+      return new InlineTree(a);
+    }
+    return null;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/HaltNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class HaltNode extends Node {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("HaltNode");
+  }
+
+
+  public HaltNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/InlineTree.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.PrintStream;
+import sun.jvm.hotspot.ci.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.utilities.GrowableArray;
+import sun.jvm.hotspot.types.*;
+
+public class InlineTree extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("InlineTree");
+    callerJvmsField = type.getAddressField("_caller_jvms");
+    methodField = type.getAddressField("_method");
+    callerTreeField = type.getAddressField("_caller_tree");
+    subtreesField = type.getAddressField("_subtrees");
+  }
+
+  private static AddressField callerJvmsField;
+  private static AddressField methodField;
+  private static AddressField callerTreeField;
+  private static AddressField subtreesField;
+
+  private static StaticBaseConstructor inlineTreeConstructor = new StaticBaseConstructor<InlineTree>(InlineTree.class);
+
+  public InlineTree(Address addr) {
+    super(addr);
+  }
+
+  public InlineTree callerTree() {
+    Address addr = callerTreeField.getValue(getAddress());
+    if (addr == null) return null;
+
+    return new InlineTree(addr);
+  }
+
+  public ciMethod method() {
+    return (ciMethod) ciObjectFactory.get(methodField.getValue(getAddress()));
+  }
+
+  public JVMState callerJvms() {
+    return JVMState.create(callerJvmsField.getValue(getAddress()));
+  }
+
+  public int callerBci() {
+    JVMState jvms = callerJvms();
+    return (jvms != null) ? jvms.bci() : -1;
+  }
+
+  public GrowableArray<InlineTree> subtrees() {
+    Address addr = getAddress().addOffsetTo(subtreesField.getOffset());
+
+    return GrowableArray.create(addr, inlineTreeConstructor);
+  }
+
+  public void printImpl(PrintStream st, int indent) {
+    for (int i = 0; i < indent; i++) st.print(" ");
+    st.printf(" @ %d ", callerBci());
+    method().printShortName(st);
+    st.println();
+
+    GrowableArray<InlineTree> subt = subtrees();
+    for (int i = 0 ; i < subt.length(); i++) {
+      subt.at(i).printImpl(st, indent + 2);
+    }
+  }
+  public void print(PrintStream st) {
+    printImpl(st, 2);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/JVMState.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.ci.*;
+import sun.jvm.hotspot.types.*;
+
+public class JVMState extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("JVMState");
+    mapField = type.getAddressField("_map");
+    methodField = type.getAddressField("_method");
+    bciField = new CIntField(type.getCIntegerField("_bci"), 0);
+    spField = new CIntField(type.getCIntegerField("_sp"), 0);
+    endoffField = new CIntField(type.getCIntegerField("_endoff"), 0);
+    try {
+        scloffField = new CIntField(type.getCIntegerField("_scloff"), 0);
+    } catch (Exception e) {
+    }
+    monoffField = new CIntField(type.getCIntegerField("_monoff"), 0);
+    stkoffField = new CIntField(type.getCIntegerField("_stkoff"), 0);
+    locoffField = new CIntField(type.getCIntegerField("_locoff"), 0);
+    depthField = new CIntField(type.getCIntegerField("_depth"), 0);
+    callerField = type.getAddressField("_caller");
+  }
+
+  private static AddressField mapField;
+  private static AddressField methodField;
+  private static CIntField bciField;
+  private static CIntField spField;
+  private static CIntField endoffField;
+  private static CIntField scloffField;
+  private static CIntField monoffField;
+  private static CIntField stkoffField;
+  private static CIntField locoffField;
+  private static CIntField depthField;
+  private static AddressField callerField;
+
+  public static JVMState create(Address addr) {
+    if (addr == null) return null;
+    return new JVMState(addr);
+  }
+
+  public JVMState(Address addr) {
+    super(addr);
+  }
+
+  public ciMethod method() {
+    return (ciMethod) ciObjectFactory.get(methodField.getValue(getAddress()));
+  }
+
+  public int bci() {
+    return (int)bciField.getValue(getAddress());
+  }
+
+  public JVMState caller() {
+    return create(callerField.getValue(getAddress()));
+  }
+
+  public void dumpSpec(PrintStream out) {
+    ciMethod m = method();
+    if (m != null) {
+      Method meth = m.method();
+      out.print(" " + meth.getMethodHolder().getName().asString().replace('/', '.') + "::" +
+                meth.getName().asString() + " @ bci:" + bci());
+    } else {
+      out.print(" runtime stub");
+    }
+    if (caller() != null)  caller().dumpSpec(out);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/LoopNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class LoopNode extends RegionNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("LoopNode");
+  }
+
+
+  public LoopNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/MachCallJavaNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.PrintStream;
+import sun.jvm.hotspot.ci.ciMethod;
+import sun.jvm.hotspot.ci.ciObjectFactory;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class MachCallJavaNode extends MachCallNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("MachCallJavaNode");
+    methodField = type.getAddressField("_method");
+    bciField = new CIntField(type.getCIntegerField("_bci"), 0);
+  }
+
+  private static AddressField methodField;
+  private static CIntField bciField;
+
+  public ciMethod method() {
+    return (ciMethod) ciObjectFactory.get(methodField.getValue(getAddress()));
+  }
+
+  public MachCallJavaNode(Address addr) {
+    super(addr);
+  }
+
+  public void dumpSpec(PrintStream st) {
+    ciMethod m = method();
+    if (m != null) {
+      m.printShortName(st);
+      st.print(" ");
+    }
+    super.dumpSpec(st);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/MachCallNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.PrintStream;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class MachCallNode extends MachSafePointNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("MachCallNode");
+  }
+
+  public MachCallNode(Address addr) {
+    super(addr);
+  }
+
+  public void dumpSpec(PrintStream st) {
+      st.print("# ");
+      // tf()->dump_on(st);
+      // if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
+      if (jvms() != null)  jvms().dumpSpec(st);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/MachCallRuntimeNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.PrintStream;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.CStringUtilities;
+
+public class MachCallRuntimeNode extends MachCallJavaNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("MachCallRuntimeNode");
+    nameField    = type.getAddressField("_name");
+  }
+
+  private static AddressField nameField;
+
+  public String name() {
+    return CStringUtilities.getString(nameField.getValue(getAddress()));
+  }
+
+  public MachCallRuntimeNode(Address addr) {
+    super(addr);
+  }
+
+  public void dumpSpec(PrintStream out) {
+    out.printf("%s ", name());
+    super.dumpSpec(out);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/MachCallStaticJavaNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.PrintStream;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.CStringUtilities;
+
+public class MachCallStaticJavaNode extends MachCallJavaNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("MachCallStaticJavaNode");
+    nameField    = type.getAddressField("_name");
+  }
+
+  private static AddressField nameField;
+
+  public String name() {
+    return CStringUtilities.getString(nameField.getValue(getAddress()));
+  }
+
+  public MachCallStaticJavaNode(Address addr) {
+    super(addr);
+  }
+
+  public void dumpSpec(PrintStream st) {
+    st.print("Static ");
+    String n = name();
+    if (n != null) {
+      st.printf("wrapper for: %s", n);
+      // dump_trap_args(st);
+      st.print(" ");
+    }
+    super.dumpSpec(st);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/MachIfNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.PrintStream;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class MachIfNode extends MachNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("MachIfNode");
+    probField = type.getJFloatField("_prob");
+    fcntField = type.getJFloatField("_fcnt");
+  }
+
+  private static JFloatField probField;
+  private static JFloatField fcntField;
+
+  float prob() {
+    return probField.getValue(getAddress());
+  }
+
+  float cnt() {
+    return fcntField.getValue(getAddress());
+  }
+
+  public MachIfNode(Address addr) {
+    super(addr);
+  }
+
+  public void dumpSpec(PrintStream out) {
+    out.print("P=" + prob() + ", C=" + cnt());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/MachNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class MachNode extends Node {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("MachNode");
+  }
+
+  public MachNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/MachReturnNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class MachReturnNode extends MachNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("MachReturnNode");
+  }
+
+  public MachReturnNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/MachSafePointNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.PrintStream;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class MachSafePointNode extends MachReturnNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("MachSafePointNode");
+    jvmsField = type.getAddressField("_jvms");
+    jvmadjField = new CIntField(type.getCIntegerField("_jvmadj"), 0);
+  }
+
+  private static AddressField jvmsField;
+  private static CIntField jvmadjField;
+
+  public MachSafePointNode(Address addr) {
+    super(addr);
+  }
+
+  public JVMState jvms() {
+    return JVMState.create(jvmsField.getValue(getAddress()));
+  }
+
+  public void dumpSpec(PrintStream out) {
+    try {
+      JVMState jvms = jvms();
+      if (jvms != null) out.print(" !");
+      if (jvms == null) out.print("empty jvms");
+      while (jvms != null) {
+        Method m = jvms.method().method();
+        int bci = jvms.bci();
+        out.print(" " + m.getMethodHolder().getName().asString().replace('/', '.') + "::" + m.getName().asString() + " @ bci:" + bci);
+        jvms = jvms.caller();
+      }
+    } catch (Exception e) {
+      out.print(e);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/MultiNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class MultiNode extends Node {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("MultiNode");
+  }
+
+
+  public MultiNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Node.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.io.*;
+import java.lang.reflect.Constructor;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class Node extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("Node");
+    outmaxField = new CIntField(type.getCIntegerField("_outmax"), 0);
+    outcntField = new CIntField(type.getCIntegerField("_outcnt"), 0);
+    maxField = new CIntField(type.getCIntegerField("_max"), 0);
+    cntField = new CIntField(type.getCIntegerField("_cnt"), 0);
+    idxField = new CIntField(type.getCIntegerField("_idx"), 0);
+    outField = type.getAddressField("_out");
+    inField = type.getAddressField("_in");
+
+    nodeType = db.lookupType("Node");
+
+    virtualConstructor = new VirtualBaseConstructor(db, nodeType, "sun.jvm.hotspot.opto", Node.class);
+  }
+
+  private static CIntField outmaxField;
+  private static CIntField outcntField;
+  private static CIntField maxField;
+  private static CIntField cntField;
+  private static CIntField idxField;
+  private static AddressField outField;
+  private static AddressField inField;
+
+  private static VirtualBaseConstructor virtualConstructor;
+
+  private static Type nodeType;
+
+  static HashMap nodes = new HashMap();
+
+  static HashMap constructors = new HashMap();
+
+  static abstract class Instantiator {
+    abstract Node create(Address addr);
+  }
+
+  static public Node create(Address addr) {
+    if (addr == null) return null;
+    Node result = (Node)nodes.get(addr);
+    if (result == null) {
+      result = (Node)virtualConstructor.instantiateWrapperFor(addr);
+      nodes.put(addr, result);
+    }
+    return result;
+  }
+
+  public Node(Address addr) {
+    super(addr);
+  }
+
+  public int outcnt() {
+    return (int)outcntField.getValue(this.getAddress());
+  }
+
+  public int req() {
+    return (int)cntField.getValue(this.getAddress());
+  }
+
+  public int len() {
+    return (int)maxField.getValue(this.getAddress());
+  }
+
+  public int idx() {
+    return (int)idxField.getValue(this.getAddress());
+  }
+
+  private Node[] _out;
+  private Node[] _in;
+
+  public Node rawOut(int i) {
+    if (_out == null) {
+      int addressSize = (int)VM.getVM().getAddressSize();
+      _out = new Node[outcnt()];
+      Address ptr = outField.getValue(this.getAddress());
+      for (int j = 0; j < outcnt(); j++) {
+        _out[j] = Node.create(ptr.getAddressAt(j * addressSize));
+      }
+    }
+    return _out[i];
+  }
+
+  public Node in(int i) {
+    if (_in == null) {
+      int addressSize = (int)VM.getVM().getAddressSize();
+      _in = new Node[len()];
+      Address ptr = inField.getValue(this.getAddress());
+      for (int j = 0; j < len(); j++) {
+        _in[j] = Node.create(ptr.getAddressAt(j * addressSize));
+      }
+    }
+    return _in[i];
+  }
+
+  public ArrayList collect(int d, boolean onlyCtrl) {
+    int depth = Math.abs(d);
+    ArrayList nstack = new ArrayList();
+    BitSet set = new BitSet();
+
+    nstack.add(this);
+    set.set(idx());
+    int begin = 0;
+    int end = 0;
+    for (int i = 0; i < depth; i++) {
+      end = nstack.size();
+      for(int j = begin; j < end; j++) {
+        Node tp  = (Node)nstack.get(j);
+        int limit = d > 0 ? tp.len() : tp.outcnt();
+        for(int k = 0; k < limit; k++) {
+          Node n = d > 0 ? tp.in(k) : tp.rawOut(k);
+
+          // if (NotANode(n))  continue;
+          if (n == null) continue;
+          // do not recurse through top or the root (would reach unrelated stuff)
+          // if (n.isRoot() || n.isTop())  continue;
+          // if (onlyCtrl && !n.isCfg()) continue;
+
+          if (!set.get(n.idx())) {
+            nstack.add(n);
+            set.set(n.idx());
+          }
+        }
+      }
+      begin = end;
+    }
+    return nstack;
+  }
+
+  protected void dumpNodes(Node s, int d, boolean onlyCtrl, PrintStream out) {
+    if (s == null) return;
+
+    ArrayList nstack = s.collect(d, onlyCtrl);
+    int end = nstack.size();
+    if (d > 0) {
+      for(int j = end-1; j >= 0; j--) {
+        ((Node)nstack.get(j)).dump(out);
+      }
+    } else {
+      for(int j = 0; j < end; j++) {
+        ((Node)nstack.get(j)).dump(out);
+      }
+    }
+  }
+
+  public void dump(int depth, PrintStream out) {
+    dumpNodes(this, depth, false, out);
+  }
+
+  public String Name() {
+    Type t = VM.getVM().getTypeDataBase().findDynamicTypeForAddress(getAddress(), nodeType);
+    String name = null;
+    if (t != null) {
+        name = t.toString();
+    } else {
+        Class c = getClass();
+        if (c == Node.class) {
+            // couldn't identify class type
+            return "UnknownNode<" + getAddress().getAddressAt(0) + ">";
+        }
+        name = getClass().getName();
+        if (name.startsWith("sun.jvm.hotspot.opto.")) {
+            name = name.substring("sun.jvm.hotspot.opto.".length());
+        }
+    }
+    if (name.endsWith("Node")) {
+        return name.substring(0, name.length() - 4);
+    }
+    return name;
+  }
+
+  public void dump(PrintStream out) {
+    out.print(" ");
+    out.print(idx());
+    out.print("\t");
+    out.print(Name());
+    out.print("\t=== ");
+    int i = 0;
+    for (i = 0; i < req(); i++) {
+      Node n = in(i);
+      if (n != null) {
+        out.print(' ');
+        out.print(in(i).idx());
+      } else {
+        out.print("_");
+      }
+      out.print(" ");
+    }
+    if (len() != req()) {
+      int prec = 0;
+      for (; i < len(); i++) {
+        Node n = in(i);
+        if (n != null) {
+          if (prec++ == 0) {
+            out.print("| ");
+          }
+          out.print(in(i).idx());
+        }
+        out.print(" ");
+      }
+    }
+    dumpOut(out);
+    dumpSpec(out);
+    out.println();
+  }
+
+  void dumpOut(PrintStream out) {
+    // Delimit the output edges
+    out.print(" [[");
+    // Dump the output edges
+    for (int i = 0; i < outcnt(); i++) {    // For all outputs
+      Node u = rawOut(i);
+      if (u == null) {
+        out.print("_ ");
+      // } else if (NotANode(u)) {
+      //   out.print("NotANode ");
+      } else {
+        // out.print("%c%d ", Compile::current()->nodeArena()->contains(u) ? ' ' : 'o', u->_idx);
+        out.print(' ');
+        out.print(u.idx());
+        out.print(' ');
+      }
+    }
+    out.print("]] ");
+  }
+
+  public void dumpSpec(PrintStream out) {
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Node_Array.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class Node_Array extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("Node_Array");
+    maxField = new CIntField(type.getCIntegerField("_max"), 0);
+    nodesField = type.getAddressField("_nodes");
+    aField = type.getAddressField("_a");
+  }
+
+  private static CIntField maxField;
+  private static AddressField nodesField;
+  private static AddressField aField;
+
+  public Node_Array(Address addr) {
+    super(addr);
+  }
+
+  public int Size() {
+    return (int) maxField.getValue(getAddress());
+  }
+
+  public Node at(int i) {
+    return Node.create(nodesField.getValue(getAddress()).getAddressAt(i * (int)VM.getVM().getAddressSize()));
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Node_List.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class Node_List extends Node_Array {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("Node_List");
+    cntField = new CIntField(type.getCIntegerField("_cnt"), 0);
+  }
+
+  private static CIntField cntField;
+
+  public Node_List(Address addr) {
+    super(addr);
+  }
+
+  public int size() {
+    return (int) cntField.getValue(getAddress());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Phase.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class Phase extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("Phase");
+  }
+
+
+  public Phase(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import java.io.PrintStream;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class PhaseCFG extends Phase {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("PhaseCFG");
+    numBlocksField = new CIntField(type.getCIntegerField("_num_blocks"), 0);
+    blocksField = type.getAddressField("_blocks");
+    bbsField = type.getAddressField("_bbs");
+    brootField = type.getAddressField("_broot");
+  }
+
+  private static CIntField numBlocksField;
+  private static AddressField blocksField;
+  private static AddressField bbsField;
+  private static AddressField brootField;
+
+  public PhaseCFG(Address addr) {
+    super(addr);
+  }
+
+  public void dump(PrintStream out) {
+    int addressSize = (int)VM.getVM().getAddressSize();
+    int numBlocks = (int)numBlocksField.getValue(getAddress());
+    Block_List blocks = new Block_List(getAddress().addOffsetTo(blocksField.getOffset()));
+    int offset = 0;
+    for (int i  = 0; i < numBlocks; i++) {
+      blocks.at(i).dump(out);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseRegAlloc.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class PhaseRegAlloc extends Phase {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("PhaseRegAlloc");
+    nodeRegsField = type.getAddressField("_node_regs");
+    nodeRegsMaxIndexField = new CIntField(type.getCIntegerField("_node_regs_max_index"), 0);
+    framesizeField = new CIntField(type.getCIntegerField("_framesize"), 0);
+    maxRegField = new CIntField(type.getCIntegerField("_max_reg"), 0);
+  }
+
+  private static AddressField nodeRegsField;
+  private static CIntField nodeRegsMaxIndexField;
+  private static CIntField framesizeField;
+  private static CIntField maxRegField;
+
+  public PhaseRegAlloc(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/PhiNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class PhiNode extends TypeNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("PhiNode");
+  }
+
+
+  public PhiNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/ProjNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class ProjNode extends Node {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("ProjNode");
+  }
+
+
+  public ProjNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/RegionNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class RegionNode extends Node {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("RegionNode");
+  }
+
+
+  public RegionNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/RootNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class RootNode extends LoopNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("RootNode");
+  }
+
+
+  public RootNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/SafePointNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.io.PrintStream;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class SafePointNode extends MultiNode {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("SafePointNode");
+    jvmsField = type.getAddressField("_jvms");
+  }
+
+  private static AddressField jvmsField;
+
+  public SafePointNode(Address addr) {
+    super(addr);
+  }
+
+  public JVMState jvms() {
+    return JVMState.create(jvmsField.getValue(getAddress()));
+  }
+
+  public void dumpSpec(PrintStream out) {
+    JVMState jvms = jvms();
+    if (jvms != null) out.print(" !");
+    while (jvms != null) {
+      Method m = jvms.method().method();
+      int bci = jvms.bci();
+      out.print(" " + m.getMethodHolder().getName().asString().replace('/', '.') + "::" + m.getName().asString() + " @ bci:" + bci);
+      jvms = jvms.caller();
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/TypeNode.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class TypeNode extends Node {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("TypeNode");
+  }
+
+
+  public TypeNode(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/prims/JvmtiExport.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.prims;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class JvmtiExport {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("JvmtiExport");
+    // XXX
+    // canAccessLocalVariables =    type.getCIntegerField("_can_access_local_variables");
+    // canHotswapOrPostBreakpoint = type.getCIntegerField("_can_hotswap_or_post_breakpoint");
+    // canPostOnExceptions =        type.getCIntegerField("_can_post_on_exceptions");
+  }
+
+  private static CIntegerField canAccessLocalVariables;
+  private static CIntegerField canHotswapOrPostBreakpoint;
+  private static CIntegerField canPostOnExceptions;
+
+  public static boolean canAccessLocalVariables() {
+    // return canAccessLocalVariables.getValue() != 0;
+    return false;
+  }
+  public static boolean canHotswapOrPostBreakpoint() {
+    // return canHotswapOrPostBreakpoint.getValue() != 0;
+    return false;
+  }
+  public static boolean canPostOnExceptions() {
+    // return canPostOnExceptions.getValue() != 0;
+    return false;
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/CompiledVFrame.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/CompiledVFrame.java	Thu Dec 22 15:46:11 2011 +0000
@@ -93,6 +93,8 @@
   }
 
   public StackValueCollection getLocals() {
+    if (getScope() == null)
+      return new StackValueCollection();
     List scvList = getScope().getLocals();
     if (scvList == null)
       return new StackValueCollection();
@@ -108,6 +110,8 @@
   }
 
   public StackValueCollection getExpressions() {
+    if (getScope() == null)
+      return new StackValueCollection();
     List scvList = getScope().getExpressions();
     if (scvList == null)
       return new StackValueCollection();
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/CompilerThread.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/CompilerThread.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,40 @@
 package sun.jvm.hotspot.runtime;
 
 import java.io.*;
+import java.util.*;
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.ci.*;
 
 public class CompilerThread extends JavaThread {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static AddressField _env_field;
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("CompilerThread");
+
+    _env_field = type.getAddressField("_env");
+  }
+
+  private ciEnv _env;
+
+  public synchronized ciEnv env() {
+    if (_env == null) {
+      Address v = _env_field.getValue(this.getAddress());
+      if (v != null) {
+        _env = new ciEnv(v);
+      }
+    }
+    return _env;
+  }
+
   public CompilerThread(Address addr) {
     super(addr);
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java	Thu Dec 22 15:46:11 2011 +0000
@@ -33,6 +33,7 @@
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.interpreter.*;
 import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.runtime.sparc.SPARCFrame;
 import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
 
@@ -74,11 +75,19 @@
   /** Size of constMethodOopDesc for computing BCI from BCP (FIXME: hack) */
   private static long    constMethodOopDescSize;
 
+  private static int pcReturnOffset;
+
+  public static int pcReturnOffset() {
+    return pcReturnOffset;
+  }
+
   private static synchronized void initialize(TypeDataBase db) {
     Type constMethodOopType = db.lookupType("constMethodOopDesc");
     // FIXME: not sure whether alignment here is correct or how to
     // force it (round up to address size?)
     constMethodOopDescSize = constMethodOopType.getSize();
+
+    pcReturnOffset = db.lookupIntConstant("frame::pc_return_offset").intValue();
   }
 
   protected int bcpToBci(Address bcp, ConstMethod cm) {
@@ -106,6 +115,10 @@
   public void    setPC(Address newpc) { pc = newpc; }
   public boolean isDeoptimized()      { return deoptimized; }
 
+  public CodeBlob cb() {
+    return VM.getVM().getCodeCache().findBlob(getPC());
+  }
+
   public abstract Address getSP();
   public abstract Address getID();
   public abstract Address getFP();
@@ -134,6 +147,12 @@
     }
   }
 
+  public boolean isRicochetFrame() {
+    CodeBlob cb = VM.getVM().getCodeCache().findBlob(getPC());
+    RicochetBlob rcb = VM.getVM().ricochetBlob();
+    return (cb == rcb && rcb != null && rcb.returnsToBounceAddr(getPC()));
+  }
+
   public boolean isCompiledFrame() {
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(!VM.getVM().isCore(), "noncore builds only");
@@ -142,7 +161,7 @@
     return (cb != null && cb.isJavaMethod());
   }
 
-  public boolean isGlueFrame() {
+  public boolean isRuntimeFrame() {
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(!VM.getVM().isCore(), "noncore builds only");
     }
@@ -197,7 +216,8 @@
   public Frame realSender(RegisterMap map) {
     if (!VM.getVM().isCore()) {
       Frame result = sender(map);
-      while (result.isGlueFrame()) {
+      while (result.isRuntimeFrame() ||
+             result.isRicochetFrame()) {
         result = result.sender(map);
       }
       return result;
@@ -611,6 +631,9 @@
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(cb != null, "sanity check");
     }
+    if (cb == VM.getVM().ricochetBlob()) {
+      oopsRicochetDo(oopVisitor, regMap);
+    }
     if (cb.getOopMaps() != null) {
       OopMapSet.oopsDo(this, cb, regMap, oopVisitor, VM.getVM().isDebugging());
 
@@ -627,6 +650,10 @@
     //    }
   }
 
+  private void oopsRicochetDo      (AddressVisitor oopVisitor, RegisterMap regMap) {
+    // XXX Empty for now
+  }
+
   // FIXME: implement the above routines, plus add
   // oops_interpreted_arguments_do and oops_compiled_arguments_do
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/InstanceConstructor.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.cdbg.CDebugger;
+import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
+import sun.jvm.hotspot.debugger.cdbg.LoadObject;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.HotSpotTypeDataBase;
+
+/** Given a pointer to some memory return an appropriate wrapper.
+ Various subclasses provide different mechanisms for identifying the
+ appropriate wrapper. */
+
+abstract public class InstanceConstructor<T> {
+  /** Instantiate the most-precisely typed wrapper object available
+      for the type of the given Address. If no type in the mapping
+      matched the type of the Address, throws a WrongTypeException.
+      Returns null for a null address (similar behavior to
+      VMObjectFactory). */
+  abstract public T instantiateWrapperFor(Address addr) throws WrongTypeException;
+
+  protected WrongTypeException newWrongTypeException(Address addr) {
+    String message = "No suitable match for type of address " + addr;
+    CDebugger cdbg = VM.getVM().getDebugger().getCDebugger();
+    if (cdbg != null) {
+      // Most common case: V-table pointer is the first field
+      Address vtblPtr = addr.getAddressAt(0);
+      LoadObject lo = cdbg.loadObjectContainingPC(vtblPtr);
+      if (lo != null) {
+        ClosestSymbol symbol = lo.closestSymbolToPC(vtblPtr);
+        if (symbol != null) {
+          message += " (nearest symbol is " + symbol.getName() + ")";
+        }
+      }
+    }
+
+    return new WrongTypeException(message);
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaVFrame.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaVFrame.java	Thu Dec 22 15:46:11 2011 +0000
@@ -128,14 +128,14 @@
       }
 
       // dynamic part - we just compare the frame pointer
-      if (! getFrame().getFP().equals(other.getFrame().getFP())) {
+      if (! getFrame().equals(other.getFrame())) {
           return false;
       }
       return true;
   }
 
   public int hashCode() {
-      return getMethod().hashCode() ^ getBCI() ^ getFrame().getFP().hashCode();
+      return getMethod().hashCode() ^ getBCI() ^ getFrame().hashCode();
   }
 
   /** Structural compare */
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/StackValue.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/StackValue.java	Thu Dec 22 15:46:11 2011 +0000
@@ -100,7 +100,7 @@
 
   public int hashCode() {
     if (type == BasicType.getTObject()) {
-      return handleValue.hashCode();
+      return handleValue != null ? handleValue.hashCode() : 5;
     } else {
       // Returns 0 for conflict type
       return (int) integerValue;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/StaticBaseConstructor.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.cdbg.CDebugger;
+import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
+import sun.jvm.hotspot.debugger.cdbg.LoadObject;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.HotSpotTypeDataBase;
+
+/** Instantiate wrappers for statically typed instances. */
+
+public class StaticBaseConstructor<T> extends InstanceConstructor {
+  private Class staticType;
+
+  public StaticBaseConstructor(Class<T> t) {
+    staticType = t;
+  }
+
+  /** Instantiate a wrapper using staticType */
+  public VMObject instantiateWrapperFor(Address addr) throws WrongTypeException {
+    if (addr == null) {
+      return null;
+    }
+
+    return (VMObject) VMObjectFactory.newObject(staticType, addr);
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VFrame.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VFrame.java	Thu Dec 22 15:46:11 2011 +0000
@@ -77,7 +77,7 @@
           return new CompiledVFrame(f, regMap, thread, scope, mayBeImprecise);
         }
 
-        if (f.isGlueFrame()) {
+        if (f.isRuntimeFrame()) {
           // This is a conversion frame. Skip this frame and try again.
           RegisterMap tempMap = regMap.copy();
           Frame s = f.sender(tempMap);
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Dec 22 15:46:11 2011 +0000
@@ -30,6 +30,7 @@
 import java.util.regex.*;
 import sun.jvm.hotspot.code.*;
 import sun.jvm.hotspot.c1.*;
+import sun.jvm.hotspot.code.*;
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.interpreter.*;
 import sun.jvm.hotspot.memory.*;
@@ -85,6 +86,9 @@
   private Interpreter  interpreter;
   private StubRoutines stubRoutines;
   private Bytes        bytes;
+
+  private RicochetBlob ricochetBlob;
+
   /** Flags indicating whether we are attached to a core, C1, or C2 build */
   private boolean      usingClientCompiler;
   private boolean      usingServerCompiler;
@@ -131,12 +135,14 @@
      private String name;
      private Address addr;
      private String kind;
+     private int origin;
 
-     private Flag(String type, String name, Address addr, String kind) {
+     private Flag(String type, String name, Address addr, String kind, int origin) {
         this.type = type;
         this.name = name;
         this.addr = addr;
         this.kind = kind;
+        this.origin = origin;
      }
 
      public String getType() {
@@ -155,6 +161,10 @@
         return kind;
      }
 
+     public int getOrigin() {
+        return origin;
+     }
+
      public boolean isBool() {
         return type.equals("bool");
      }
@@ -300,7 +310,7 @@
         usingServerCompiler = false;
       } else {
         // Determine whether C2 is present
-        if (type.getField("_interpreter_invocation_count", false, false) != null) {
+        if (db.lookupType("Matcher", false) != null) {
           usingServerCompiler = true;
         } else {
           usingClientCompiler = true;
@@ -618,6 +628,18 @@
     return stubRoutines;
   }
 
+  public RicochetBlob ricochetBlob() {
+    if (ricochetBlob == null) {
+      Type ricochetType  = db.lookupType("SharedRuntime");
+      AddressField ricochetBlobAddress = ricochetType.getAddressField("_ricochet_blob");
+      Address addr = ricochetBlobAddress.getValue();
+      if (addr != null) {
+        ricochetBlob = new RicochetBlob(addr);
+      }
+    }
+    return ricochetBlob;
+  }
+
   public VMRegImpl getVMRegImplInfo() {
     if (vmregImpl == null) {
       vmregImpl = new VMRegImpl();
@@ -788,42 +810,40 @@
   private void readCommandLineFlags() {
     // get command line flags
     TypeDataBase db = getTypeDataBase();
-    try {
-       Type flagType = db.lookupType("Flag");
-       int numFlags = (int) flagType.getCIntegerField("numFlags").getValue();
-       // NOTE: last flag contains null values.
-       commandLineFlags = new Flag[numFlags - 1];
+    Type flagType = db.lookupType("Flag");
+    int numFlags = (int) flagType.getCIntegerField("numFlags").getValue();
+    // NOTE: last flag contains null values.
+    commandLineFlags = new Flag[numFlags - 1];
+
+    Address flagAddr = flagType.getAddressField("flags").getValue();
 
-       Address flagAddr = flagType.getAddressField("flags").getValue();
+    AddressField typeFld = flagType.getAddressField("type");
+    AddressField nameFld = flagType.getAddressField("name");
+    AddressField addrFld = flagType.getAddressField("addr");
+    AddressField kindFld = flagType.getAddressField("kind");
+    CIntField originFld = new CIntField(flagType.getCIntegerField("origin"), 0);
 
-       AddressField typeFld = flagType.getAddressField("type");
-       AddressField nameFld = flagType.getAddressField("name");
-       AddressField addrFld = flagType.getAddressField("addr");
-       AddressField kindFld = flagType.getAddressField("kind");
-
-       long flagSize = flagType.getSize(); // sizeof(Flag)
+    long flagSize = flagType.getSize(); // sizeof(Flag)
 
-       // NOTE: last flag contains null values.
-       for (int f = 0; f < numFlags - 1; f++) {
-          String type = CStringUtilities.getString(typeFld.getValue(flagAddr));
-          String name = CStringUtilities.getString(nameFld.getValue(flagAddr));
-          Address addr = addrFld.getValue(flagAddr);
-          String kind = CStringUtilities.getString(kindFld.getValue(flagAddr));
-          commandLineFlags[f] = new Flag(type, name, addr, kind);
-          flagAddr = flagAddr.addOffsetTo(flagSize);
-       }
+    // NOTE: last flag contains null values.
+    for (int f = 0; f < numFlags - 1; f++) {
+      String type = CStringUtilities.getString(typeFld.getValue(flagAddr));
+      String name = CStringUtilities.getString(nameFld.getValue(flagAddr));
+      Address addr = addrFld.getValue(flagAddr);
+      String kind = CStringUtilities.getString(kindFld.getValue(flagAddr));
+      int origin = (int)originFld.getValue(flagAddr);
+      commandLineFlags[f] = new Flag(type, name, addr, kind, origin);
+      flagAddr = flagAddr.addOffsetTo(flagSize);
+    }
 
-       // sort flags by name
-       Arrays.sort(commandLineFlags, new Comparator() {
-                                        public int compare(Object o1, Object o2) {
-                                           Flag f1 = (Flag) o1;
-                                           Flag f2 = (Flag) o2;
-                                           return f1.getName().compareTo(f2.getName());
-                                        }
-                                     });
-    } catch (Exception exp) {
-       // ignore. may be older version. command line flags not available.
-    }
+    // sort flags by name
+    Arrays.sort(commandLineFlags, new Comparator() {
+        public int compare(Object o1, Object o2) {
+          Flag f1 = (Flag) o1;
+          Flag f2 = (Flag) o2;
+          return f1.getName().compareTo(f2.getName());
+        }
+      });
   }
 
   public String getSystemProperty(String key) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VirtualBaseConstructor.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.cdbg.CDebugger;
+import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
+import sun.jvm.hotspot.debugger.cdbg.LoadObject;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.HotSpotTypeDataBase;
+
+/** This provides a factory to create instances where the base virtual
+ * type is know and the expected subclasses are within a particular
+ * package. */
+
+public class VirtualBaseConstructor<T> extends InstanceConstructor {
+  private TypeDataBase db;
+  private HashMap      map; // Map<String, Class>
+  private Type         baseType;
+  private Class        unknownTypeHandler;
+
+  public VirtualBaseConstructor(TypeDataBase db, Type baseType, String packageName, Class unknownTypeHandler) {
+    this.db = (HotSpotTypeDataBase)db;
+    map     = new HashMap();
+    this.baseType = baseType;
+    this.unknownTypeHandler = unknownTypeHandler;
+    // Try to find mirror types for each of the types.  If there isn't
+    // a direct mirror then try to find an instantiable superclass and
+    // treat it as that.
+    for (Iterator iter = db.getTypes(); iter.hasNext(); ) {
+      Type t = (Type) iter.next();
+      Type superType = t;
+      while (superType != null && superType != baseType) {
+        superType = superType.getSuperclass();
+      }
+      if (superType == baseType) {
+        superType = t;
+        Class c = null;
+        while (c == null && superType != null) {
+          try {
+            c = Class.forName(packageName + "." + superType.getName());
+          } catch (Exception e) {
+          }
+          if (c == null) superType = superType.getSuperclass();
+        }
+        if (c == null) {
+          c = unknownTypeHandler;
+        }
+        map.put(t.getName(), c);
+      }
+    }
+  }
+
+  /** Instantiate the most-precisely typed wrapper object available
+      for the type of the given Address. If no type in the mapping
+      matched the type of the Address, throws a WrongTypeException.
+      Returns null for a null address (similar behavior to
+      VMObjectFactory). */
+  public VMObject instantiateWrapperFor(Address addr) throws WrongTypeException {
+    if (addr == null) {
+      return null;
+    }
+
+    Type type = db.findDynamicTypeForAddress(addr, baseType);
+    if (type != null) {
+        return (VMObject) VMObjectFactory.newObject((Class) map.get(type.getName()), addr);
+    } else if (unknownTypeHandler != null) {
+        return (VMObject) VMObjectFactory.newObject(unknownTypeHandler, addr);
+    }
+
+    throw newWrongTypeException(addr);
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VirtualConstructor.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VirtualConstructor.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 
 import java.util.*;
 import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
 import sun.jvm.hotspot.types.*;
 
 /** This class provides generalized "virtual constructor"
@@ -39,7 +38,7 @@
     type "DefNewGeneration" to class
     sun.jvm.hotspot.memory.DefNewGeneration has been set up. */
 
-public class VirtualConstructor {
+public class VirtualConstructor extends InstanceConstructor<VMObject> {
   private TypeDataBase db;
   private Map          map; // Map<String, Class>
 
@@ -78,20 +77,6 @@
       }
     }
 
-    String message = "No suitable match for type of address " + addr;
-    CDebugger cdbg = VM.getVM().getDebugger().getCDebugger();
-    if (cdbg != null) {
-      // Most common case: V-table pointer is the first field
-      Address vtblPtr = addr.getAddressAt(0);
-      LoadObject lo = cdbg.loadObjectContainingPC(vtblPtr);
-      if (lo != null) {
-        ClosestSymbol symbol = lo.closestSymbolToPC(vtblPtr);
-        if (symbol != null) {
-          message += " (nearest symbol is " + symbol.getName() + ")";
-        }
-      }
-    }
-
-    throw new WrongTypeException(message);
+    throw newWrongTypeException(addr);
   }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64CurrentFrameGuess.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64CurrentFrameGuess.java	Thu Dec 22 15:46:11 2011 +0000
@@ -29,6 +29,7 @@
 import sun.jvm.hotspot.code.*;
 import sun.jvm.hotspot.interpreter.*;
 import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.runtime.x86.*;
 
 /** <P> Should be able to be used on all amd64 platforms we support
     (Linux/amd64) to implement JavaThread's
@@ -123,7 +124,7 @@
              offset += vm.getAddressSize()) {
           try {
             Address curSP = sp.addOffsetTo(offset);
-            Frame frame = new AMD64Frame(curSP, null, pc);
+            Frame frame = new X86Frame(curSP, null, pc);
             RegisterMap map = thread.newRegisterMap(false);
             while (frame != null) {
               if (frame.isEntryFrame() && frame.entryFrameIsFirst()) {
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64Frame.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,528 +0,0 @@
-/*
- * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.runtime.amd64;
-
-import java.util.*;
-import sun.jvm.hotspot.code.*;
-import sun.jvm.hotspot.compiler.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
-
-/** Specialization of and implementation of abstract methods of the
-    Frame class for the amd64 CPU. */
-
-public class AMD64Frame extends Frame {
-  private static final boolean DEBUG;
-  static {
-    DEBUG = System.getProperty("sun.jvm.hotspot.runtime.amd64.AMD64Frame.DEBUG") != null;
-  }
-
-  // refer to frame_amd64.hpp
-  private static final int PC_RETURN_OFFSET           =  0;
-  // All frames
-  private static final int LINK_OFFSET                =  0;
-  private static final int RETURN_ADDR_OFFSET         =  1;
-  private static final int SENDER_SP_OFFSET           =  2;
-
-  // Interpreter frames
-  private static final int INTERPRETER_FRAME_MIRROR_OFFSET    =  2; // for native calls only
-  private static final int INTERPRETER_FRAME_SENDER_SP_OFFSET = -1;
-  private static final int INTERPRETER_FRAME_LAST_SP_OFFSET   = INTERPRETER_FRAME_SENDER_SP_OFFSET - 1;
-  private static final int INTERPRETER_FRAME_METHOD_OFFSET    = INTERPRETER_FRAME_LAST_SP_OFFSET - 1;
-  private static       int INTERPRETER_FRAME_MDX_OFFSET;         // Non-core builds only
-  private static       int INTERPRETER_FRAME_CACHE_OFFSET;
-  private static       int INTERPRETER_FRAME_LOCALS_OFFSET;
-  private static       int INTERPRETER_FRAME_BCX_OFFSET;
-  private static       int INTERPRETER_FRAME_INITIAL_SP_OFFSET;
-  private static       int INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET;
-  private static       int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET;
-
-  // Entry frames
-  private static final int ENTRY_FRAME_CALL_WRAPPER_OFFSET   =  -6;
-
-  // Native frames
-  private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET =  2;
-
-  static {
-    VM.registerVMInitializedObserver(new Observer() {
-        public void update(Observable o, Object data) {
-          initialize(VM.getVM().getTypeDataBase());
-        }
-      });
-  }
-
-  private static synchronized void initialize(TypeDataBase db) {
-    if (VM.getVM().isCore()) {
-      INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
-    } else {
-      INTERPRETER_FRAME_MDX_OFFSET   = INTERPRETER_FRAME_METHOD_OFFSET - 1;
-      INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1;
-    }
-    INTERPRETER_FRAME_LOCALS_OFFSET               = INTERPRETER_FRAME_CACHE_OFFSET - 1;
-    INTERPRETER_FRAME_BCX_OFFSET                  = INTERPRETER_FRAME_LOCALS_OFFSET - 1;
-    INTERPRETER_FRAME_INITIAL_SP_OFFSET           = INTERPRETER_FRAME_BCX_OFFSET - 1;
-    INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET    = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
-    INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
-  }
-
-  // an additional field beyond sp and pc:
-  Address raw_fp; // frame pointer
-  private Address raw_unextendedSP;
-
-  private AMD64Frame() {
-  }
-
-  private void adjustForDeopt() {
-    if ( pc != null) {
-      // Look for a deopt pc and if it is deopted convert to original pc
-      CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
-      if (cb != null && cb.isJavaMethod()) {
-        NMethod nm = (NMethod) cb;
-        if (pc.equals(nm.deoptBegin())) {
-          // adjust pc if frame is deoptimized.
-          if (Assert.ASSERTS_ENABLED) {
-            Assert.that(this.getUnextendedSP() != null, "null SP in Java frame");
-          }
-          pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset());
-          deoptimized = true;
-        }
-      }
-    }
-  }
-
-  public AMD64Frame(Address raw_sp, Address raw_fp, Address pc) {
-    this.raw_sp = raw_sp;
-    this.raw_unextendedSP = raw_sp;
-    this.raw_fp = raw_fp;
-    this.pc = pc;
-
-    // Frame must be fully constructed before this call
-    adjustForDeopt();
-
-    if (DEBUG) {
-      System.out.println("AMD64Frame(sp, fp, pc): " + this);
-      dumpStack();
-    }
-  }
-
-  public AMD64Frame(Address raw_sp, Address raw_fp) {
-    this.raw_sp = raw_sp;
-    this.raw_unextendedSP = raw_sp;
-    this.raw_fp = raw_fp;
-    this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
-
-    // Frame must be fully constructed before this call
-    adjustForDeopt();
-
-    if (DEBUG) {
-      System.out.println("AMD64Frame(sp, fp): " + this);
-      dumpStack();
-    }
-  }
-
-  // This constructor should really take the unextended SP as an arg
-  // but then the constructor is ambiguous with constructor that takes
-  // a PC so take an int and convert it.
-  public AMD64Frame(Address raw_sp, Address raw_fp, long extension) {
-    this.raw_sp = raw_sp;
-    if ( raw_sp == null) {
-      this.raw_unextendedSP = null;
-    } else {
-      this.raw_unextendedSP = raw_sp.addOffsetTo(extension);
-    }
-    this.raw_fp = raw_fp;
-    this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
-
-    // Frame must be fully constructed before this call
-    adjustForDeopt();
-
-    if (DEBUG) {
-      System.out.println("AMD64Frame(sp, fp, extension): " + this);
-      dumpStack();
-    }
-
-  }
-
-  public Object clone() {
-    AMD64Frame frame = new AMD64Frame();
-    frame.raw_sp = raw_sp;
-    frame.raw_unextendedSP = raw_unextendedSP;
-    frame.raw_fp = raw_fp;
-    frame.pc = pc;
-    frame.deoptimized = deoptimized;
-    return frame;
-  }
-
-  public boolean equals(Object arg) {
-    if (arg == null) {
-      return false;
-    }
-
-    if (!(arg instanceof AMD64Frame)) {
-      return false;
-    }
-
-    AMD64Frame other = (AMD64Frame) arg;
-
-    return (AddressOps.equal(getSP(), other.getSP()) &&
-            AddressOps.equal(getFP(), other.getFP()) &&
-            AddressOps.equal(getUnextendedSP(), other.getUnextendedSP()) &&
-            AddressOps.equal(getPC(), other.getPC()));
-  }
-
-  public int hashCode() {
-    if (raw_sp == null) {
-      return 0;
-    }
-
-    return raw_sp.hashCode();
-  }
-
-  public String toString() {
-    return "sp: " + (getSP() == null? "null" : getSP().toString()) +
-         ", unextendedSP: " + (getUnextendedSP() == null? "null" : getUnextendedSP().toString()) +
-         ", fp: " + (getFP() == null? "null" : getFP().toString()) +
-         ", pc: " + (pc == null? "null" : pc.toString());
-  }
-
-  // accessors for the instance variables
-  public Address getFP() { return raw_fp; }
-  public Address getSP() { return raw_sp; }
-  public Address getID() { return raw_sp; }
-
-  // FIXME: not implemented yet (should be done for Solaris/AMD64)
-  public boolean isSignalHandlerFrameDbg() { return false; }
-  public int     getSignalNumberDbg()      { return 0;     }
-  public String  getSignalNameDbg()        { return null;  }
-
-  public boolean isInterpretedFrameValid() {
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(isInterpretedFrame(), "Not an interpreted frame");
-    }
-
-    // These are reasonable sanity checks
-    if (getFP() == null || getFP().andWithMask(0x3) != null) {
-      return false;
-    }
-
-    if (getSP() == null || getSP().andWithMask(0x3) != null) {
-      return false;
-    }
-
-    if (getFP().addOffsetTo(INTERPRETER_FRAME_INITIAL_SP_OFFSET * VM.getVM().getAddressSize()).lessThan(getSP())) {
-      return false;
-    }
-
-    // These are hacks to keep us out of trouble.
-    // The problem with these is that they mask other problems
-    if (getFP().lessThanOrEqual(getSP())) {
-      // this attempts to deal with unsigned comparison above
-      return false;
-    }
-
-    if (getFP().minus(getSP()) > 4096 * VM.getVM().getAddressSize()) {
-      // stack frames shouldn't be large.
-      return false;
-    }
-
-    return true;
-  }
-
-  // FIXME: not applicable in current system
-  //  void    patch_pc(Thread* thread, address pc);
-
-  public Frame sender(RegisterMap regMap, CodeBlob cb) {
-    AMD64RegisterMap map = (AMD64RegisterMap) regMap;
-
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(map != null, "map must be set");
-    }
-
-    // Default is we done have to follow them. The sender_for_xxx will
-    // update it accordingly
-    map.setIncludeArgumentOops(false);
-
-    if (isEntryFrame())       return senderForEntryFrame(map);
-    if (isInterpretedFrame()) return senderForInterpreterFrame(map);
-
-
-    if (!VM.getVM().isCore()) {
-      if(cb == null) {
-        cb = VM.getVM().getCodeCache().findBlob(getPC());
-      } else {
-        if (Assert.ASSERTS_ENABLED) {
-          Assert.that(cb.equals(VM.getVM().getCodeCache().findBlob(getPC())), "Must be the same");
-        }
-      }
-
-      if (cb != null) {
-         return senderForCompiledFrame(map, cb);
-      }
-    }
-
-    // Must be native-compiled frame, i.e. the marshaling code for native
-    // methods that exists in the core system.
-    return new AMD64Frame(getSenderSP(), getLink(), getSenderPC());
-  }
-
-  private Frame senderForEntryFrame(AMD64RegisterMap map) {
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(map != null, "map must be set");
-    }
-    // Java frame called from C; skip all C frames and return top C
-    // frame of that chunk as the sender
-    AMD64JavaCallWrapper jcw = (AMD64JavaCallWrapper) getEntryFrameCallWrapper();
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(!entryFrameIsFirst(), "next Java fp must be non zero");
-      Assert.that(jcw.getLastJavaSP().greaterThan(getSP()), "must be above this frame on stack");
-    }
-    AMD64Frame fr;
-    if (jcw.getLastJavaPC() != null) {
-      fr = new AMD64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP(), jcw.getLastJavaPC());
-    } else {
-      fr = new AMD64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP());
-    }
-    map.clear();
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(map.getIncludeArgumentOops(), "should be set by clear");
-    }
-    return fr;
-  }
-
-  private Frame senderForInterpreterFrame(AMD64RegisterMap map) {
-    Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
-    Address sp = addressOfStackSlot(SENDER_SP_OFFSET);
-    // We do not need to update the callee-save register mapping because above
-    // us is either another interpreter frame or a converter-frame, but never
-    // directly a compiled frame.
-    // 11/24/04 SFG. This is no longer true after adapter were removed. However at the moment
-    // C2 no longer uses callee save register for java calls so there are no callee register
-    // to find.
-    return new AMD64Frame(sp, getLink(), unextendedSP.minus(sp));
-  }
-
-  private Frame senderForCompiledFrame(AMD64RegisterMap map, CodeBlob cb) {
-    //
-    // NOTE: some of this code is (unfortunately) duplicated in AMD64CurrentFrameGuess
-    //
-
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(map != null, "map must be set");
-    }
-
-    // frame owned by optimizing compiler
-    Address        sender_sp = null;
-
-
-    if (VM.getVM().isClientCompiler()) {
-      sender_sp        = addressOfStackSlot(SENDER_SP_OFFSET);
-    } else {
-      if (Assert.ASSERTS_ENABLED) {
-        Assert.that(cb.getFrameSize() >= 0, "Compiled by Compiler1: do not use");
-      }
-      sender_sp = getUnextendedSP().addOffsetTo(cb.getFrameSize());
-    }
-
-    // On Intel the return_address is always the word on the stack
-    Address sender_pc = sender_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
-
-    if (map.getUpdateMap() && cb.getOopMaps() != null) {
-      OopMapSet.updateRegisterMap(this, cb, map, true);
-    }
-
-    if (VM.getVM().isClientCompiler()) {
-      // Move this here for C1 and collecting oops in arguments (According to Rene)
-      map.setIncludeArgumentOops(cb.callerMustGCArguments(map.getThread()));
-    }
-
-    Address saved_fp = null;
-    if (VM.getVM().isClientCompiler()) {
-      saved_fp = getFP().getAddressAt(0);
-    } else if (VM.getVM().isServerCompiler() &&
-               (VM.getVM().getInterpreter().contains(sender_pc) ||
-               VM.getVM().getStubRoutines().returnsToCallStub(sender_pc))) {
-      // C2 prologue saves EBP in the usual place.
-      // however only use it if the sender had link infomration in it.
-      saved_fp = sender_sp.getAddressAt(-2 * VM.getVM().getAddressSize());
-    }
-
-    return new AMD64Frame(sender_sp, saved_fp, sender_pc);
-  }
-
-  protected boolean hasSenderPD() {
-    // FIXME
-    // Check for null ebp? Need to do some tests.
-    return true;
-  }
-
-  public long frameSize() {
-    return (getSenderSP().minus(getSP()) / VM.getVM().getAddressSize());
-  }
-
-  public Address getLink() {
-    return addressOfStackSlot(LINK_OFFSET).getAddressAt(0);
-  }
-
-  // FIXME: not implementable yet
-  //inline void      frame::set_link(intptr_t* addr)  { *(intptr_t **)addr_at(link_offset) = addr; }
-
-  public Address getUnextendedSP() { return raw_unextendedSP; }
-
-  // Return address:
-  public Address getSenderPCAddr() { return addressOfStackSlot(RETURN_ADDR_OFFSET); }
-  public Address getSenderPC()     { return getSenderPCAddr().getAddressAt(0);      }
-
-  // return address of param, zero origin index.
-  public Address getNativeParamAddr(int idx) {
-    return addressOfStackSlot(NATIVE_FRAME_INITIAL_PARAM_OFFSET + idx);
-  }
-
-  public Address getSenderSP()     { return addressOfStackSlot(SENDER_SP_OFFSET); }
-
-  public Address compiledArgumentToLocationPD(VMReg reg, RegisterMap regMap, int argSize) {
-    if (VM.getVM().isCore() || VM.getVM().isClientCompiler()) {
-      throw new RuntimeException("Should not reach here");
-    }
-
-    return oopMapRegToLocation(reg, regMap);
-  }
-
-  public Address addressOfInterpreterFrameLocals() {
-    return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET);
-  }
-
-  private Address addressOfInterpreterFrameBCX() {
-    return addressOfStackSlot(INTERPRETER_FRAME_BCX_OFFSET);
-  }
-
-  public int getInterpreterFrameBCI() {
-    // FIXME: this is not atomic with respect to GC and is unsuitable
-    // for use in a non-debugging, or reflective, system. Need to
-    // figure out how to express this.
-    Address bcp = addressOfInterpreterFrameBCX().getAddressAt(0);
-    OopHandle methodHandle = addressOfInterpreterFrameMethod().getOopHandleAt(0);
-    Method method = (Method) VM.getVM().getObjectHeap().newOop(methodHandle);
-    return (int) bcpToBci(bcp, method);
-  }
-
-  public Address addressOfInterpreterFrameMDX() {
-    return addressOfStackSlot(INTERPRETER_FRAME_MDX_OFFSET);
-  }
-
-  // FIXME
-  //inline int frame::interpreter_frame_monitor_size() {
-  //  return BasicObjectLock::size();
-  //}
-
-  // expression stack
-  // (the max_stack arguments are used by the GC; see class FrameClosure)
-
-  public Address addressOfInterpreterFrameExpressionStack() {
-    Address monitorEnd = interpreterFrameMonitorEnd().address();
-    return monitorEnd.addOffsetTo(-1 * VM.getVM().getAddressSize());
-  }
-
-  public int getInterpreterFrameExpressionStackDirection() { return -1; }
-
-  // top of expression stack
-  public Address addressOfInterpreterFrameTOS() {
-    return getSP();
-  }
-
-  /** Expression stack from top down */
-  public Address addressOfInterpreterFrameTOSAt(int slot) {
-    return addressOfInterpreterFrameTOS().addOffsetTo(slot * VM.getVM().getAddressSize());
-  }
-
-  public Address getInterpreterFrameSenderSP() {
-    if (Assert.ASSERTS_ENABLED) {
-      Assert.that(isInterpretedFrame(), "interpreted frame expected");
-    }
-    return addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
-  }
-
-  // Monitors
-  public BasicObjectLock interpreterFrameMonitorBegin() {
-    return new BasicObjectLock(addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET));
-  }
-
-  public BasicObjectLock interpreterFrameMonitorEnd() {
-    Address result = addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET).getAddressAt(0);
-    if (Assert.ASSERTS_ENABLED) {
-      // make sure the pointer points inside the frame
-      Assert.that(AddressOps.gt(getFP(), result), "result must <  than frame pointer");
-      Assert.that(AddressOps.lte(getSP(), result), "result must >= than stack pointer");
-    }
-    return new BasicObjectLock(result);
-  }
-
-  public int interpreterFrameMonitorSize() {
-    return BasicObjectLock.size();
-  }
-
-  // Method
-  public Address addressOfInterpreterFrameMethod() {
-    return addressOfStackSlot(INTERPRETER_FRAME_METHOD_OFFSET);
-  }
-
-  // Constant pool cache
-  public Address addressOfInterpreterFrameCPCache() {
-    return addressOfStackSlot(INTERPRETER_FRAME_CACHE_OFFSET);
-  }
-
-  // Entry frames
-  public JavaCallWrapper getEntryFrameCallWrapper() {
-    return new AMD64JavaCallWrapper(addressOfStackSlot(ENTRY_FRAME_CALL_WRAPPER_OFFSET).getAddressAt(0));
-  }
-
-  protected Address addressOfSavedOopResult() {
-    // offset is 2 for compiler2 and 3 for compiler1
-    return getSP().addOffsetTo((VM.getVM().isClientCompiler() ? 2 : 3) *
-                               VM.getVM().getAddressSize());
-  }
-
-  protected Address addressOfSavedReceiver() {
-    return getSP().addOffsetTo(-4 * VM.getVM().getAddressSize());
-  }
-
-  private void dumpStack() {
-    if (getFP() != null) {
-      for (Address addr = getSP().addOffsetTo(-5 * VM.getVM().getAddressSize());
-           AddressOps.lte(addr, getFP().addOffsetTo(5 * VM.getVM().getAddressSize()));
-           addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
-        System.out.println(addr + ": " + addr.getAddressAt(0));
-      }
-    } else {
-      for (Address addr = getSP().addOffsetTo(-5 * VM.getVM().getAddressSize());
-           AddressOps.lte(addr, getSP().addOffsetTo(20 * VM.getVM().getAddressSize()));
-           addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
-        System.out.println(addr + ": " + addr.getAddressAt(0));
-      }
-    }
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64RegisterMap.java	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.runtime.amd64;
-
-import sun.jvm.hotspot.asm.amd64.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.runtime.*;
-
-public class AMD64RegisterMap extends RegisterMap {
-
-  /** This is the only public constructor */
-  public AMD64RegisterMap(JavaThread thread, boolean updateMap) {
-    super(thread, updateMap);
-  }
-
-  protected AMD64RegisterMap(RegisterMap map) {
-    super(map);
-  }
-
-  public Object clone() {
-    AMD64RegisterMap retval = new AMD64RegisterMap(this);
-    return retval;
-  }
-
-  // no PD state to clear or copy:
-  protected void clearPD() {}
-  protected void initializePD() {}
-  protected void initializeFromPD(RegisterMap map) {}
-  protected Address getLocationPD(VMReg reg) { return null; }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/linux_amd64/LinuxAMD64JavaThreadPDAccess.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/linux_amd64/LinuxAMD64JavaThreadPDAccess.java	Thu Dec 22 15:46:11 2011 +0000
@@ -30,6 +30,7 @@
 import sun.jvm.hotspot.debugger.amd64.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.runtime.amd64.*;
+import sun.jvm.hotspot.runtime.x86.*;
 import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
 
@@ -80,11 +81,11 @@
     if (fp == null) {
       return null; // no information
     }
-    return new AMD64Frame(thread.getLastJavaSP(), fp);
+    return new X86Frame(thread.getLastJavaSP(), fp);
   }
 
   public    RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
-    return new AMD64RegisterMap(thread, updateMap);
+    return new X86RegisterMap(thread, updateMap);
   }
 
   public    Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
@@ -95,9 +96,9 @@
       return null;
     }
     if (guesser.getPC() == null) {
-      return new AMD64Frame(guesser.getSP(), guesser.getFP());
+      return new X86Frame(guesser.getSP(), guesser.getFP());
     } else {
-      return new AMD64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
+      return new X86Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
     }
   }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/solaris_amd64/SolarisAMD64JavaThreadPDAccess.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/solaris_amd64/SolarisAMD64JavaThreadPDAccess.java	Thu Dec 22 15:46:11 2011 +0000
@@ -30,6 +30,7 @@
 import sun.jvm.hotspot.debugger.amd64.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.runtime.amd64.*;
+import sun.jvm.hotspot.runtime.x86.*;
 import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
 
@@ -84,14 +85,14 @@
         }
         Address pc =  thread.getLastJavaPC();
         if ( pc != null ) {
-            return new AMD64Frame(thread.getLastJavaSP(), fp, pc);
+            return new X86Frame(thread.getLastJavaSP(), fp, pc);
         } else {
-            return new AMD64Frame(thread.getLastJavaSP(), fp);
+            return new X86Frame(thread.getLastJavaSP(), fp);
         }
     }
 
     public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
-        return new AMD64RegisterMap(thread, updateMap);
+        return new X86RegisterMap(thread, updateMap);
     }
 
     public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
@@ -102,9 +103,9 @@
             return null;
         }
         if (guesser.getPC() == null) {
-            return new AMD64Frame(guesser.getSP(), guesser.getFP());
+            return new X86Frame(guesser.getSP(), guesser.getFP());
         } else {
-            return new AMD64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
+            return new X86Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
         }
     }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java	Thu Dec 22 15:46:11 2011 +0000
@@ -236,7 +236,7 @@
       CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
       if (cb != null && cb.isJavaMethod()) {
         NMethod nm = (NMethod) cb;
-        if (pc.equals(nm.deoptBegin())) {
+        if (pc.equals(nm.deoptHandlerBegin())) {
           // adjust pc if frame is deoptimized.
           pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset());
           deoptimized = true;
@@ -559,49 +559,46 @@
       }
     }
 
-    if (!VM.getVM().isCore()) {
-      // Note:  The version of this operation on any platform with callee-save
-      //        registers must update the register map (if not null).
-      //        In order to do this correctly, the various subtypes of
-      //        of frame (interpreted, compiled, glue, native),
-      //        must be distinguished.  There is no need on SPARC for
-      //        such distinctions, because all callee-save registers are
-      //        preserved for all frames via SPARC-specific mechanisms.
-      //
-      //        *** HOWEVER, *** if and when we make any floating-point
-      //        registers callee-saved, then we will have to copy over
-      //        the RegisterMap update logic from the Intel code.
+    // Note:  The version of this operation on any platform with callee-save
+    //        registers must update the register map (if not null).
+    //        In order to do this correctly, the various subtypes of
+    //        of frame (interpreted, compiled, glue, native),
+    //        must be distinguished.  There is no need on SPARC for
+    //        such distinctions, because all callee-save registers are
+    //        preserved for all frames via SPARC-specific mechanisms.
+    //
+    //        *** HOWEVER, *** if and when we make any floating-point
+    //        registers callee-saved, then we will have to copy over
+    //        the RegisterMap update logic from the Intel code.
 
+    if (isRicochetFrame()) return senderForRicochetFrame(map);
 
-      // The constructor of the sender must know whether this frame is interpreted so it can set the
-      // sender's _interpreter_sp_adjustment field.
-      if (VM.getVM().getInterpreter().contains(pc)) {
-        isInterpreted = true;
-        map.makeIntegerRegsUnsaved();
+    // The constructor of the sender must know whether this frame is interpreted so it can set the
+    // sender's _interpreter_sp_adjustment field.
+    if (VM.getVM().getInterpreter().contains(pc)) {
+      isInterpreted = true;
+      map.makeIntegerRegsUnsaved();
+      map.shiftWindow(sp, youngerSP);
+    } else {
+      // Find a CodeBlob containing this frame's pc or elide the lookup and use the
+      // supplied blob which is already known to be associated with this frame.
+      cb = VM.getVM().getCodeCache().findBlob(pc);
+      if (cb != null) {
+        // Update the location of all implicitly saved registers
+        // as the address of these registers in the register save
+        // area (for %o registers we use the address of the %i
+        // register in the next younger frame)
         map.shiftWindow(sp, youngerSP);
-      } else {
-        // Find a CodeBlob containing this frame's pc or elide the lookup and use the
-        // supplied blob which is already known to be associated with this frame.
-        cb = VM.getVM().getCodeCache().findBlob(pc);
-        if (cb != null) {
-
-          if (cb.callerMustGCArguments(map.getThread())) {
+        if (map.getUpdateMap()) {
+          if (cb.callerMustGCArguments()) {
             map.setIncludeArgumentOops(true);
           }
-
-          // Update the location of all implicitly saved registers
-          // as the address of these registers in the register save
-          // area (for %o registers we use the address of the %i
-          // register in the next younger frame)
-          map.shiftWindow(sp, youngerSP);
-          if (map.getUpdateMap()) {
-            if (cb.getOopMaps() != null) {
-              OopMapSet.updateRegisterMap(this, cb, map, VM.getVM().isDebugging());
-            }
+          if (cb.getOopMaps() != null) {
+            OopMapSet.updateRegisterMap(this, cb, map, VM.getVM().isDebugging());
           }
         }
       }
-    } // #ifndef CORE
+    }
 
     return new SPARCFrame(biasSP(sp), biasSP(youngerSP), isInterpreted);
   }
@@ -948,6 +945,20 @@
   }
 
 
+  private Frame senderForRicochetFrame(SPARCRegisterMap map) {
+    if (DEBUG) {
+      System.out.println("senderForRicochetFrame");
+    }
+    //RicochetFrame* f = RicochetFrame::from_frame(fr);
+    // Cf. is_interpreted_frame path of frame::sender
+    Address youngerSP = getSP();
+    Address sp        = getSenderSP();
+    map.makeIntegerRegsUnsaved();
+    map.shiftWindow(sp, youngerSP);
+    boolean thisFrameAdjustedStack = true;  // I5_savedSP is live in this RF
+    return new SPARCFrame(sp, youngerSP, thisFrameAdjustedStack);
+  }
+
   private Frame senderForEntryFrame(RegisterMap regMap) {
     SPARCRegisterMap map = (SPARCRegisterMap) regMap;
 
@@ -965,10 +976,8 @@
     Address lastJavaPC = jcw.getLastJavaPC();
     map.clear();
 
-    if (!VM.getVM().isCore()) {
-      map.makeIntegerRegsUnsaved();
-      map.shiftWindow(lastJavaSP, null);
-    }
+    map.makeIntegerRegsUnsaved();
+    map.shiftWindow(lastJavaSP, null);
 
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(map.getIncludeArgumentOops(), "should be set by clear");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCRicochetFrame.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime.sparc;
+
+import java.util.*;
+import sun.jvm.hotspot.asm.sparc.SPARCRegister;
+import sun.jvm.hotspot.asm.sparc.SPARCRegisters;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+public class SPARCRicochetFrame {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private SPARCFrame frame;
+
+  private static void initialize(TypeDataBase db) {
+    // Type type = db.lookupType("MethodHandles::RicochetFrame");
+
+  }
+
+  static SPARCRicochetFrame fromFrame(SPARCFrame f) {
+    return new SPARCRicochetFrame(f);
+  }
+
+  private SPARCRicochetFrame(SPARCFrame f) {
+    frame = f;
+  }
+
+  private Address registerValue(SPARCRegister reg) {
+    return frame.getSP().addOffsetTo(reg.spOffsetInSavedWindow()).getAddressAt(0);
+  }
+
+  public Address savedArgsBase() {
+    return registerValue(SPARCRegisters.L4);
+  }
+  public Address exactSenderSP() {
+    return registerValue(SPARCRegisters.I5);
+  }
+  public Address senderLink() {
+    return frame.getSenderSP();
+  }
+  public Address senderPC() {
+    return frame.getSenderPC();
+  }
+  public Address extendedSenderSP() {
+    return savedArgsBase();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/vmSymbols.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.memory.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
+
+
+public class vmSymbols {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static Address symbolsAddress;
+  private static int FIRST_SID;
+  private static int SID_LIMIT;
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type            = db.lookupType("vmSymbols");
+    symbolsAddress       = type.getAddressField("_symbols[0]").getStaticFieldAddress();
+    FIRST_SID            = db.lookupIntConstant("vmSymbols::FIRST_SID");
+    SID_LIMIT            = db.lookupIntConstant("vmSymbols::SID_LIMIT");
+  }
+
+  public static Symbol symbolAt(int id) {
+    if (id < FIRST_SID || id >= SID_LIMIT) throw new IndexOutOfBoundsException("bad SID " + id);
+    return Symbol.create(symbolsAddress.getAddressAt(id * VM.getVM().getAddressSize()));
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/win32_amd64/Win32AMD64JavaThreadPDAccess.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/win32_amd64/Win32AMD64JavaThreadPDAccess.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,10 +27,10 @@
 import java.io.*;
 import java.util.*;
 import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.win32.*;
 import sun.jvm.hotspot.debugger.amd64.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.runtime.amd64.*;
+import sun.jvm.hotspot.runtime.x86.*;
 import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
 
@@ -86,14 +86,14 @@
     }
     Address pc =  thread.getLastJavaPC();
     if ( pc != null ) {
-      return new AMD64Frame(thread.getLastJavaSP(), fp, pc);
+      return new X86Frame(thread.getLastJavaSP(), fp, pc);
     } else {
-      return new AMD64Frame(thread.getLastJavaSP(), fp);
+      return new X86Frame(thread.getLastJavaSP(), fp);
     }
   }
 
   public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
-    return new AMD64RegisterMap(thread, updateMap);
+    return new X86RegisterMap(thread, updateMap);
   }
 
   public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
@@ -104,9 +104,9 @@
       return null;
     }
     if (guesser.getPC() == null) {
-      return new AMD64Frame(guesser.getSP(), guesser.getFP());
+      return new X86Frame(guesser.getSP(), guesser.getFP());
     } else {
-      return new AMD64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
+      return new X86Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
     }
   }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/win32_x86/Win32X86JavaThreadPDAccess.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/win32_x86/Win32X86JavaThreadPDAccess.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
 import java.io.*;
 import java.util.*;
 import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.win32.*;
 import sun.jvm.hotspot.debugger.x86.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.runtime.x86.*;
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java	Thu Dec 22 15:46:11 2011 +0000
@@ -25,7 +25,6 @@
 package sun.jvm.hotspot.runtime.x86;
 
 import java.util.*;
-import sun.jvm.hotspot.asm.x86.*;
 import sun.jvm.hotspot.code.*;
 import sun.jvm.hotspot.compiler.*;
 import sun.jvm.hotspot.debugger.*;
@@ -62,11 +61,13 @@
   private static       int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET;
 
   // Entry frames
-  private static final int ENTRY_FRAME_CALL_WRAPPER_OFFSET   =  2;
+  private static       int ENTRY_FRAME_CALL_WRAPPER_OFFSET;
 
   // Native frames
   private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET =  2;
 
+  private static VMReg rbp;
+
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -76,19 +77,23 @@
   }
 
   private static synchronized void initialize(TypeDataBase db) {
-    if (VM.getVM().isCore()) {
-      INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
-    } else {
-      INTERPRETER_FRAME_MDX_OFFSET   = INTERPRETER_FRAME_METHOD_OFFSET - 1;
-      INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1;
-    }
+    INTERPRETER_FRAME_MDX_OFFSET                  = INTERPRETER_FRAME_METHOD_OFFSET - 1;
+    INTERPRETER_FRAME_CACHE_OFFSET                = INTERPRETER_FRAME_MDX_OFFSET - 1;
     INTERPRETER_FRAME_LOCALS_OFFSET               = INTERPRETER_FRAME_CACHE_OFFSET - 1;
     INTERPRETER_FRAME_BCX_OFFSET                  = INTERPRETER_FRAME_LOCALS_OFFSET - 1;
     INTERPRETER_FRAME_INITIAL_SP_OFFSET           = INTERPRETER_FRAME_BCX_OFFSET - 1;
     INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET    = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
     INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
+
+    ENTRY_FRAME_CALL_WRAPPER_OFFSET = db.lookupIntConstant("frame::entry_frame_call_wrapper_offset");
+    if (VM.getVM().getAddressSize() == 4) {
+      rbp = new VMReg(5);
+    } else {
+      rbp = new VMReg(5 << 1);
+    }
   }
 
+
   // an additional field beyond sp and pc:
   Address raw_fp; // frame pointer
   private Address raw_unextendedSP;
@@ -102,7 +107,7 @@
       CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
       if (cb != null && cb.isJavaMethod()) {
         NMethod nm = (NMethod) cb;
-        if (pc.equals(nm.deoptBegin())) {
+        if (pc.equals(nm.deoptHandlerBegin())) {
           if (Assert.ASSERTS_ENABLED) {
             Assert.that(this.getUnextendedSP() != null, "null SP in Java frame");
           }
@@ -119,6 +124,7 @@
     this.raw_unextendedSP = raw_sp;
     this.raw_fp = raw_fp;
     this.pc = pc;
+    adjustUnextendedSP();
 
     // Frame must be fully constructed before this call
     adjustForDeopt();
@@ -134,6 +140,7 @@
     this.raw_unextendedSP = raw_sp;
     this.raw_fp = raw_fp;
     this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
+    adjustUnextendedSP();
 
     // Frame must be fully constructed before this call
     adjustForDeopt();
@@ -144,24 +151,18 @@
     }
   }
 
-  // This constructor should really take the unextended SP as an arg
-  // but then the constructor is ambiguous with constructor that takes
-  // a PC so take an int and convert it.
-  public X86Frame(Address raw_sp, Address raw_fp, long extension) {
+  public X86Frame(Address raw_sp, Address raw_unextendedSp, Address raw_fp, Address pc) {
     this.raw_sp = raw_sp;
-    if (raw_sp == null) {
-      this.raw_unextendedSP = null;
-    } else {
-      this.raw_unextendedSP = raw_sp.addOffsetTo(extension);
-    }
+    this.raw_unextendedSP = raw_unextendedSp;
     this.raw_fp = raw_fp;
-    this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
+    this.pc = pc;
+    adjustUnextendedSP();
 
     // Frame must be fully constructed before this call
     adjustForDeopt();
 
     if (DEBUG) {
-      System.out.println("X86Frame(sp, fp): " + this);
+      System.out.println("X86Frame(sp, unextendedSP, fp, pc): " + this);
       dumpStack();
     }
 
@@ -172,7 +173,6 @@
     frame.raw_sp = raw_sp;
     frame.raw_unextendedSP = raw_unextendedSP;
     frame.raw_fp = raw_fp;
-    frame.raw_fp = raw_fp;
     frame.pc = pc;
     frame.deoptimized = deoptimized;
     return frame;
@@ -269,19 +269,18 @@
 
     if (isEntryFrame())       return senderForEntryFrame(map);
     if (isInterpretedFrame()) return senderForInterpreterFrame(map);
+    if (isRicochetFrame())    return senderForRicochetFrame(map);
 
-    if (!VM.getVM().isCore()) {
-      if(cb == null) {
-        cb = VM.getVM().getCodeCache().findBlob(getPC());
-      } else {
-        if (Assert.ASSERTS_ENABLED) {
-          Assert.that(cb.equals(VM.getVM().getCodeCache().findBlob(getPC())), "Must be the same");
-        }
+    if(cb == null) {
+      cb = VM.getVM().getCodeCache().findBlob(getPC());
+    } else {
+      if (Assert.ASSERTS_ENABLED) {
+        Assert.that(cb.equals(VM.getVM().getCodeCache().findBlob(getPC())), "Must be the same");
       }
+    }
 
-      if (cb != null) {
-         return senderForCompiledFrame(map, cb);
-      }
+    if (cb != null) {
+      return senderForCompiledFrame(map, cb);
     }
 
     // Must be native-compiled frame, i.e. the marshaling code for native
@@ -289,7 +288,20 @@
     return new X86Frame(getSenderSP(), getLink(), getSenderPC());
   }
 
+  private Frame senderForRicochetFrame(X86RegisterMap map) {
+    if (DEBUG) {
+      System.out.println("senderForRicochetFrame");
+    }
+    X86RicochetFrame f = X86RicochetFrame.fromFrame(this);
+    if (map.getUpdateMap())
+      updateMapWithSavedLink(map, f.senderLinkAddress());
+    return new X86Frame(f.extendedSenderSP(), f.exactSenderSP(), f.senderLink(), f.senderPC());
+  }
+
   private Frame senderForEntryFrame(X86RegisterMap map) {
+    if (DEBUG) {
+      System.out.println("senderForEntryFrame");
+    }
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(map != null, "map must be set");
     }
@@ -313,7 +325,37 @@
     return fr;
   }
 
+  //------------------------------------------------------------------------------
+  // frame::adjust_unextended_sp
+  private void adjustUnextendedSP() {
+    // If we are returning to a compiled MethodHandle call site, the
+    // saved_fp will in fact be a saved value of the unextended SP.  The
+    // simplest way to tell whether we are returning to such a call site
+    // is as follows:
+
+    CodeBlob cb = cb();
+    NMethod senderNm = (cb == null) ? null : cb.asNMethodOrNull();
+    if (senderNm != null) {
+      // If the sender PC is a deoptimization point, get the original
+      // PC.  For MethodHandle call site the unextended_sp is stored in
+      // saved_fp.
+      if (senderNm.isDeoptMhEntry(getPC())) {
+        // DEBUG_ONLY(verifyDeoptMhOriginalPc(senderNm, getFP()));
+        raw_unextendedSP = getFP();
+      }
+      else if (senderNm.isDeoptEntry(getPC())) {
+        // DEBUG_ONLY(verifyDeoptOriginalPc(senderNm, raw_unextendedSp));
+      }
+      else if (senderNm.isMethodHandleReturn(getPC())) {
+        raw_unextendedSP = getFP();
+      }
+    }
+  }
+
   private Frame senderForInterpreterFrame(X86RegisterMap map) {
+    if (DEBUG) {
+      System.out.println("senderForInterpreterFrame");
+    }
     Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
     Address sp = addressOfStackSlot(SENDER_SP_OFFSET);
     // We do not need to update the callee-save register mapping because above
@@ -323,10 +365,21 @@
     // However c2 no longer uses callee save register for java calls so there
     // are no callee register to find.
 
-    return new X86Frame(sp, getLink(), unextendedSP.minus(sp));
+    if (map.getUpdateMap())
+      updateMapWithSavedLink(map, addressOfStackSlot(LINK_OFFSET));
+
+    return new X86Frame(sp, unextendedSP, getLink(), getSenderPC());
+  }
+
+  private void updateMapWithSavedLink(RegisterMap map, Address savedFPAddr) {
+    map.setLocation(rbp, savedFPAddr);
   }
 
   private Frame senderForCompiledFrame(X86RegisterMap map, CodeBlob cb) {
+    if (DEBUG) {
+      System.out.println("senderForCompiledFrame");
+    }
+
     //
     // NOTE: some of this code is (unfortunately) duplicated in X86CurrentFrameGuess
     //
@@ -336,41 +389,35 @@
     }
 
     // frame owned by optimizing compiler
-    Address        sender_sp = null;
-
-    if (VM.getVM().isClientCompiler()) {
-      sender_sp        = addressOfStackSlot(SENDER_SP_OFFSET);
-    } else {
-      if (Assert.ASSERTS_ENABLED) {
-        Assert.that(cb.getFrameSize() >= 0, "Compiled by Compiler1: do not use");
-      }
-      sender_sp = getUnextendedSP().addOffsetTo(cb.getFrameSize());
+    if (Assert.ASSERTS_ENABLED) {
+        Assert.that(cb.getFrameSize() >= 0, "must have non-zero frame size");
     }
+    Address senderSP = getUnextendedSP().addOffsetTo(cb.getFrameSize());
 
     // On Intel the return_address is always the word on the stack
-    Address sender_pc = sender_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
+    Address senderPC = senderSP.getAddressAt(-1 * VM.getVM().getAddressSize());
+
+    // This is the saved value of EBP which may or may not really be an FP.
+    // It is only an FP if the sender is an interpreter frame (or C1?).
+    Address savedFPAddr = senderSP.addOffsetTo(- SENDER_SP_OFFSET * VM.getVM().getAddressSize());
 
-    if (map.getUpdateMap() && cb.getOopMaps() != null) {
-      OopMapSet.updateRegisterMap(this, cb, map, true);
+    if (map.getUpdateMap()) {
+      // Tell GC to use argument oopmaps for some runtime stubs that need it.
+      // For C1, the runtime stub might not have oop maps, so set this flag
+      // outside of update_register_map.
+      map.setIncludeArgumentOops(cb.callerMustGCArguments());
+
+      if (cb.getOopMaps() != null) {
+        OopMapSet.updateRegisterMap(this, cb, map, true);
+      }
+
+      // Since the prolog does the save and restore of EBP there is no oopmap
+      // for it so we must fill in its location as if there was an oopmap entry
+      // since if our caller was compiled code there could be live jvm state in it.
+      updateMapWithSavedLink(map, savedFPAddr);
     }
 
-    if (VM.getVM().isClientCompiler()) {
-      // Move this here for C1 and collecting oops in arguments (According to Rene)
-      map.setIncludeArgumentOops(cb.callerMustGCArguments(map.getThread()));
-    }
-
-    Address saved_fp = null;
-    if (VM.getVM().isClientCompiler()) {
-      saved_fp = getFP().getAddressAt(0);
-    } else if (VM.getVM().isServerCompiler() &&
-               (VM.getVM().getInterpreter().contains(sender_pc) ||
-               VM.getVM().getStubRoutines().returnsToCallStub(sender_pc))) {
-      // C2 prologue saves EBP in the usual place.
-      // however only use it if the sender had link infomration in it.
-      saved_fp = sender_sp.getAddressAt(-2 * VM.getVM().getAddressSize());
-    }
-
-    return new X86Frame(sender_sp, saved_fp, sender_pc);
+    return new X86Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC);
   }
 
   protected boolean hasSenderPD() {
@@ -403,14 +450,6 @@
 
   public Address getSenderSP()     { return addressOfStackSlot(SENDER_SP_OFFSET); }
 
-  public Address compiledArgumentToLocationPD(VMReg reg, RegisterMap regMap, int argSize) {
-    if (VM.getVM().isCore() || VM.getVM().isClientCompiler()) {
-      throw new RuntimeException("Should not reach here");
-    }
-
-    return oopMapRegToLocation(reg, regMap);
-  }
-
   public Address addressOfInterpreterFrameLocals() {
     return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET);
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86RicochetFrame.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime.x86;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+public class X86RicochetFrame extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static void initialize(TypeDataBase db) {
+    Type type = db.lookupType("MethodHandles::RicochetFrame");
+
+    senderLinkField    = type.getAddressField("_sender_link");
+    savedArgsBaseField = type.getAddressField("_saved_args_base");
+    exactSenderSPField = type.getAddressField("_exact_sender_sp");
+    senderPCField      = type.getAddressField("_sender_pc");
+  }
+
+  private static AddressField senderLinkField;
+  private static AddressField savedArgsBaseField;
+  private static AddressField exactSenderSPField;
+  private static AddressField senderPCField;
+
+  static X86RicochetFrame fromFrame(X86Frame f) {
+    return new X86RicochetFrame(f.getFP().addOffsetTo(- senderLinkField.getOffset()));
+  }
+
+  private X86RicochetFrame(Address addr) {
+    super(addr);
+  }
+
+  public Address senderLink() {
+    return senderLinkField.getValue(addr);
+  }
+  public Address senderLinkAddress() {
+    return addr.addOffsetTo(senderLinkField.getOffset());
+  }
+  public Address savedArgsBase() {
+    return savedArgsBaseField.getValue(addr);
+  }
+  public Address extendedSenderSP() {
+    return savedArgsBase();
+  }
+  public Address exactSenderSP() {
+    return exactSenderSPField.getValue(addr);
+  }
+  public Address senderPC() {
+    return senderPCField.getValue(addr);
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,11 @@
 
 import java.util.*;
 import sun.jvm.hotspot.gc_interface.*;
+import sun.jvm.hotspot.gc_implementation.g1.*;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.gc_implementation.shared.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.tools.*;
 
 public class HeapSummary extends Tool {
 
@@ -70,32 +70,45 @@
       System.out.println();
       System.out.println("Heap Usage:");
 
-      if (heap instanceof GenCollectedHeap) {
-         GenCollectedHeap genHeap = (GenCollectedHeap) heap;
-         for (int n = 0; n < genHeap.nGens(); n++) {
-            Generation gen = genHeap.getGen(n);
-            if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
-               System.out.println("New Generation (Eden + 1 Survivor Space):");
-               printGen(gen);
+      if (heap instanceof SharedHeap) {
+         SharedHeap sharedHeap = (SharedHeap) heap;
+         if (sharedHeap instanceof GenCollectedHeap) {
+            GenCollectedHeap genHeap = (GenCollectedHeap) sharedHeap;
+            for (int n = 0; n < genHeap.nGens(); n++) {
+               Generation gen = genHeap.getGen(n);
+               if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
+                  System.out.println("New Generation (Eden + 1 Survivor Space):");
+                  printGen(gen);
 
-               ContiguousSpace eden = ((DefNewGeneration)gen).eden();
-               System.out.println("Eden Space:");
-               printSpace(eden);
+                  ContiguousSpace eden = ((DefNewGeneration)gen).eden();
+                  System.out.println("Eden Space:");
+                  printSpace(eden);
+
+                  ContiguousSpace from = ((DefNewGeneration)gen).from();
+                  System.out.println("From Space:");
+                  printSpace(from);
 
-               ContiguousSpace from = ((DefNewGeneration)gen).from();
-               System.out.println("From Space:");
-               printSpace(from);
-
-               ContiguousSpace to = ((DefNewGeneration)gen).to();
-               System.out.println("To Space:");
-               printSpace(to);
-            } else {
-               System.out.println(gen.name() + ":");
-               printGen(gen);
+                  ContiguousSpace to = ((DefNewGeneration)gen).to();
+                  System.out.println("To Space:");
+                  printSpace(to);
+               } else {
+                  System.out.println(gen.name() + ":");
+                  printGen(gen);
+               }
             }
+         } else if (sharedHeap instanceof G1CollectedHeap) {
+             G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
+             G1MonitoringSupport g1mm = g1h.g1mm();
+             System.out.println("G1 Young Generation");
+             printG1Space("Eden Space:", g1mm.edenUsed(), g1mm.edenCommitted());
+             printG1Space("From Space:", g1mm.survivorUsed(), g1mm.survivorCommitted());
+             printG1Space("To Space:", 0, 0);
+             printG1Space("G1 Old Generation", g1mm.oldUsed(), g1mm.oldCommitted());
+         } else {
+             throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
          }
-         // Perm generation
-         Generation permGen = genHeap.permGen();
+         // Perm generation shared by the above
+         Generation permGen = sharedHeap.permGen();
          System.out.println("Perm Generation:");
          printGen(permGen);
       } else if (heap instanceof ParallelScavengeHeap) {
@@ -119,7 +132,7 @@
          printValMB("free     = ", permFree);
          System.out.println(alignment + (double)permGen.used() * 100.0 / permGen.capacity() + "% used");
       } else {
-         throw new RuntimeException("unknown heap type : " + heap.getClass());
+         throw new RuntimeException("unknown CollectedHeap type : " + heap.getClass());
       }
    }
 
@@ -151,6 +164,14 @@
           return;
        }
 
+       l = getFlagValue("UseG1GC", flagMap);
+       if (l == 1L) {
+           System.out.print("Garbage-First (G1) GC ");
+           l = getFlagValue("ParallelGCThreads", flagMap);
+           System.out.println("with " + l + " thread(s)");
+           return;
+       }
+
        System.out.println("Mark Sweep Compact GC");
    }
 
@@ -191,6 +212,16 @@
       System.out.println(alignment +  (double)space.used() * 100.0 / space.capacity() + "% used");
    }
 
+   private void printG1Space(String spaceName, long used, long capacity) {
+      long free = capacity - used;
+      System.out.println(spaceName);
+      printValMB("capacity = ", capacity);
+      printValMB("used     = ", used);
+      printValMB("free     = ", free);
+      double occPerc = (capacity > 0) ? (double) used * 100.0 / capacity : 0.0;
+      System.out.println(alignment + occPerc + "% used");
+   }
+
    private static final double FACTOR = 1024*1024;
    private void printValMB(String title, long value) {
       if (value < 0) {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,9 @@
 package sun.jvm.hotspot.tools.jcore;
 
 import java.io.*;
+import java.util.jar.JarOutputStream;
+import java.util.jar.JarEntry;
+import java.util.jar.Manifest;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.oops.*;
 import sun.jvm.hotspot.debugger.*;
@@ -34,27 +37,33 @@
 public class ClassDump extends Tool {
     private ClassFilter classFilter;
     private String      outputDirectory;
+    private JarOutputStream jarStream;
+
+    public void setClassFilter(ClassFilter cf) {
+        classFilter = cf;
+    }
+
+    public void setOutputDirectory(String od) {
+        outputDirectory = od;
+        if (jarStream != null) {
+            try {
+                jarStream.close();
+            } catch (IOException ioe) {
+                ioe.printStackTrace();
+            }
+        }
+        jarStream = null;
+    }
+
+    public void setJarOutput(String jarFileName) throws IOException {
+        jarStream = new JarOutputStream(new FileOutputStream(jarFileName), new Manifest());
+        outputDirectory = null;
+    }
 
     public void run() {
         // Ready to go with the database...
         try {
 
-            // load class filters
-
-            String filterClassName = System.getProperty("sun.jvm.hotspot.tools.jcore.filter");
-            if (filterClassName != null) {
-                try {
-                    Class filterClass = Class.forName(filterClassName);
-                    classFilter = (ClassFilter) filterClass.newInstance();
-                } catch(Exception exp) {
-                    System.err.println("Warning: Can not create class filter!");
-                }
-            }
-
-            outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir");
-            if (outputDirectory == null)
-                outputDirectory = ".";
-
             // walk through the system dictionary
             SystemDictionary dict = VM.getVM().getSystemDictionary();
             dict.classesDo(new SystemDictionary.ClassVisitor() {
@@ -75,6 +84,14 @@
                                + Long.toHexString(e.getAddress()));
             e.printStackTrace();
         }
+        if (jarStream != null) {
+            try {
+                jarStream.close();
+            } catch (IOException ioe) {
+                ioe.printStackTrace();
+            }
+            jarStream = null;
+        }
     }
 
     public String getName() {
@@ -88,26 +105,33 @@
 
         String klassName = kls.getName().asString();
         klassName = klassName.replace('/', File.separatorChar);
-        int index = klassName.lastIndexOf(File.separatorChar);
-        File dir = null;
-        if (index != -1) {
-            String dirName = klassName.substring(0, index);
-            dir =  new File(outputDirectory,  dirName);
-        } else {
-            dir = new File(outputDirectory);
-        }
+        try {
+            OutputStream os = null;
+            if (jarStream != null) {
+                jarStream.putNextEntry(new JarEntry(klassName + ".class"));
+                os = jarStream;
+            } else {
+                int index = klassName.lastIndexOf(File.separatorChar);
+                File dir = null;
+                if (index != -1) {
+                    String dirName = klassName.substring(0, index);
+                    dir = new File(outputDirectory,  dirName);
+                } else {
+                    dir = new File(outputDirectory);
+                }
 
-        dir.mkdirs();
-        File f = new File(dir, klassName.substring(klassName.lastIndexOf(File.separatorChar) + 1)
-                          + ".class");
-        try {
-            f.createNewFile();
-            OutputStream os = new BufferedOutputStream(new FileOutputStream(f));
+                dir.mkdirs();
+                File f = new File(dir, klassName.substring(index + 1) + ".class");
+                f.createNewFile();
+                os = new BufferedOutputStream(new FileOutputStream(f));
+            }
             try {
                 ClassWriter cw = new ClassWriter(kls, os);
                 cw.write();
             } finally {
-                os.close();
+                if (os != jarStream) {
+                    os.close();
+                }
             }
         } catch(IOException exp) {
             exp.printStackTrace();
@@ -115,7 +139,26 @@
     }
 
     public static void main(String[] args) {
+        // load class filters
+        ClassFilter classFilter = null;
+        String filterClassName = System.getProperty("sun.jvm.hotspot.tools.jcore.filter");
+        if (filterClassName != null) {
+            try {
+                Class filterClass = Class.forName(filterClassName);
+                classFilter = (ClassFilter) filterClass.newInstance();
+            } catch(Exception exp) {
+                System.err.println("Warning: Can not create class filter!");
+            }
+        }
+
+        String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir");
+        if (outputDirectory == null)
+            outputDirectory = ".";
+
+
         ClassDump cd = new ClassDump();
+        cd.setClassFilter(classFilter);
+        cd.setOutputDirectory(outputDirectory);
         cd.start(args);
         cd.stop();
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Thu Dec 22 15:46:11 2011 +0000
@@ -379,23 +379,21 @@
     }
 
     protected void writeFields() throws IOException {
-        TypeArray fields = klass.getFields();
-        final int length = (int) fields.getLength();
+        final int length = klass.getJavaFieldsCount();
 
         // write number of fields
-        dos.writeShort((short) (length / InstanceKlass.NEXT_OFFSET) );
+        dos.writeShort((short) length);
 
-        if (DEBUG) debugMessage("number of fields = "
-                                + length/InstanceKlass.NEXT_OFFSET);
+        if (DEBUG) debugMessage("number of fields = " + length);
 
-        for (int index = 0; index < length; index += InstanceKlass.NEXT_OFFSET) {
-            short accessFlags    = fields.getShortAt(index + InstanceKlass.ACCESS_FLAGS_OFFSET);
+        for (int index = 0; index < length; index++) {
+            short accessFlags    = klass.getFieldAccessFlags(index);
             dos.writeShort(accessFlags & (short) JVM_RECOGNIZED_FIELD_MODIFIERS);
 
-            short nameIndex    = fields.getShortAt(index + InstanceKlass.NAME_INDEX_OFFSET);
+            short nameIndex    = klass.getFieldNameIndex(index);
             dos.writeShort(nameIndex);
 
-            short signatureIndex = fields.getShortAt(index + InstanceKlass.SIGNATURE_INDEX_OFFSET);
+            short signatureIndex = klass.getFieldSignatureIndex(index);
             dos.writeShort(signatureIndex);
             if (DEBUG) debugMessage("\tfield name = " + nameIndex + ", signature = " + signatureIndex);
 
@@ -404,11 +402,11 @@
             if (hasSyn)
                 fieldAttributeCount++;
 
-            short initvalIndex = fields.getShortAt(index + InstanceKlass.INITVAL_INDEX_OFFSET);
+            short initvalIndex = klass.getFieldInitialValueIndex(index);
             if (initvalIndex != 0)
                 fieldAttributeCount++;
 
-            short genSigIndex = fields.getShortAt(index + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET);
+            short genSigIndex = klass.getFieldGenericSignatureIndex(index);
             if (genSigIndex != 0)
                 fieldAttributeCount++;
 
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java	Thu Dec 22 15:46:11 2011 +0000
@@ -150,16 +150,13 @@
             }
 
             // list immediate fields only
-            TypeArray fields = klass.getFields();
-            int numFields = (int) fields.getLength();
+            int numFields = klass.getJavaFieldsCount();
             ConstantPool cp = klass.getConstants();
             out.println("fields");
             if (numFields != 0) {
-               for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) {
-                 int nameIndex = fields.getShortAt(f + InstanceKlass.NAME_INDEX_OFFSET);
-                 int sigIndex  = fields.getShortAt(f + InstanceKlass.SIGNATURE_INDEX_OFFSET);
-                 Symbol f_name = cp.getSymbolAt(nameIndex);
-                 Symbol f_sig  = cp.getSymbolAt(sigIndex);
+              for (int f = 0; f < numFields; f++){
+                 Symbol f_name = klass.getFieldName(f);
+                 Symbol f_sig  = klass.getFieldSignature(f);
                  StringBuffer sigBuf = new StringBuffer();
                  new SignatureConverter(f_sig, sigBuf).dispatchField();
                  out.print('\t');
--- a/agent/src/share/classes/sun/jvm/hotspot/types/TypeDataBase.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/types/TypeDataBase.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,6 +120,11 @@
       found. */
   public Type guessTypeForAddress(Address addr);
 
+  /** Helper routine for guessing the most derived type of a
+      polymorphic C++ object. Requires a baseType that must be virtual
+      so that lookup can be performed without false positives */
+  public Type findDynamicTypeForAddress(Address addr, Type baseType);
+
   /** Returns an Iterator over the Types in the database. */
   public Iterator getTypes();
 
--- a/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -150,6 +150,19 @@
     return VM.getVM().getOopSize();
   }
 
+  static HashMap typeToVtbl = new HashMap();
+
+  private Address vtblForType(Type type) {
+    Address vtblAddr = (Address)typeToVtbl.get(type);
+    if (vtblAddr == null) {
+      vtblAddr = vtblAccess.getVtblForType(type);
+      if (vtblAddr != null) {
+        typeToVtbl.put(type, vtblAddr);
+      }
+    }
+    return vtblAddr;
+  }
+
   public boolean addressTypeIsEqualToType(Address addr, Type type) {
     if (addr == null) {
       return false;
@@ -158,7 +171,7 @@
     // This implementation should be suitably platform-independent; we
     // search nearby memory for the vtbl value of the given type.
 
-    Address vtblAddr = vtblAccess.getVtblForType(type);
+    Address vtblAddr = vtblForType(type);
 
     if (vtblAddr == null) {
       // Type was not polymorphic, or an error occurred during lookup
@@ -251,6 +264,78 @@
     return false;
   }
 
+  public Type findDynamicTypeForAddress(Address addr, Type baseType) {
+    // This implementation should be suitably platform-independent; we
+    // search nearby memory for the vtbl value of the given type.
+
+    if (vtblForType(baseType) == null) {
+      // Type was not polymorphic which is an error of some sort
+      throw new InternalError(baseType + " does not appear to be polymorphic");
+    }
+
+    // This is a more restricted version of guessTypeForAddress since
+    // that function has some limitations since it doesn't really know
+    // where in the hierarchy a virtual type starts and just poking
+    // around in memory is likely to trip over some vtable address,
+    // resulting in false positives.  Eventually all uses should
+    // switch to this logic but in the interests of stability it will
+    // be separate for the moment.
+
+    // Assuming that the base type is truly the first polymorphic type
+    // then the vtbl for all subclasss should be at several defined
+    // locations so only those locations will be checked.  It's also
+    // required that the caller knows that the static type is at least
+    // baseType.  See the notes in guessTypeForAddress for the logic of
+    // the locations searched.
+
+    Address loc1 = addr.getAddressAt(0);
+    Address loc2 = null;
+    Address loc3 = null;
+    long offset2 = baseType.getSize();
+    // I don't think this should be misaligned under any
+    // circumstances, but I'm not sure (FIXME: also not sure which
+    // way to go here, up or down -- assuming down)
+    offset2 = offset2 - (offset2 % getAddressSize()) - getAddressSize();
+    if (offset2 > 0) {
+      loc2 = addr.getAddressAt(offset2);
+    }
+    long offset3 = offset2 - getAddressSize();
+    if (offset3 > 0) {
+      loc3 = addr.getAddressAt(offset3);
+    }
+
+    Type loc2Match = null;
+    Type loc3Match = null;
+    for (Iterator iter = getTypes(); iter.hasNext(); ) {
+      Type type = (Type) iter.next();
+      Type superClass = type;
+      while (superClass != baseType && superClass != null) {
+        superClass = superClass.getSuperclass();
+      }
+      if (superClass == null) continue;
+      Address vtblAddr = vtblForType(type);
+      if (vtblAddr == null) {
+        // This occurs sometimes for intermediate types that are never
+        // instantiated.
+        if (DEBUG) {
+          System.err.println("null vtbl for " + type);
+        }
+        continue;
+      }
+      // Prefer loc1 match
+      if (vtblAddr.equals(loc1)) return type;
+      if (loc2 != null && loc2Match == null && vtblAddr.equals(loc2)) {
+          loc2Match = type;
+      }
+      if (loc3 != null && loc3Match == null && vtblAddr.equals(loc3)) {
+          loc3Match = type;
+      }
+    }
+    if (loc2Match != null) return loc2Match;
+    if (loc3Match != null) return loc3Match;
+    return null;
+  }
+
   public Type guessTypeForAddress(Address addr) {
     for (Iterator iter = getTypes(); iter.hasNext(); ) {
       Type t = (Type) iter.next();
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/CommandProcessorPanel.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/CommandProcessorPanel.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -88,7 +88,7 @@
                                     public void run() {
                                         beginUpdate();
                                         try {
-                                            commands.executeCommand(ln);
+                                            commands.executeCommand(ln, true);
                                             commands.printPrompt();
                                             Document d = editor.getDocument();
                                             try {
@@ -149,7 +149,7 @@
     public void clear() {
         EditableAtEndDocument d = (EditableAtEndDocument) editor.getDocument();
         d.clear();
-        commands.executeCommand("");
+        commands.executeCommand("", false);
         setMark();
         editor.requestFocus();
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1116,20 +1116,15 @@
                 InstanceKlass kls = (InstanceKlass) obj;
                 buf.append(" " + kls.getName().asString() + "={");
                 int flen = ov.fieldsSize();
-
-                TypeArray klfields = kls.getFields();
-                int klen = (int) klfields.getLength();
-
-                ConstantPool cp = kls.getConstants();
+                int klen = kls.getJavaFieldsCount();
                 int findex = 0;
-                for (int index = 0; index < klen; index += kls.NEXT_OFFSET) {
-                    int accsFlags = klfields.getShortAt(index + kls.ACCESS_FLAGS_OFFSET);
-                    int nameIndex = klfields.getShortAt(index + kls.NAME_INDEX_OFFSET);
+                for (int index = 0; index < klen; index++) {
+                    int accsFlags = kls.getFieldAccessFlags(index);
+                    Symbol f_name = kls.getFieldName(index);
                     AccessFlags access = new AccessFlags(accsFlags);
                     if (!access.isStatic()) {
                         ScopeValue svf = ov.getFieldAt(findex++);
                         String    fstr = scopeValueAsString(sd, svf);
-                        Symbol f_name  = cp.getSymbolAt(nameIndex);
                         buf.append(" [" + f_name.asString() + " :"+ index + "]=(#" + fstr + ")");
                     }
                 }
@@ -1819,13 +1814,11 @@
 
    protected String genHTMLListForFields(InstanceKlass klass) {
       Formatter buf = new Formatter(genHTML);
-      TypeArray fields = klass.getFields();
-      int numFields = (int) fields.getLength();
-      ConstantPool cp = klass.getConstants();
+      int numFields = klass.getJavaFieldsCount();
       if (numFields != 0) {
          buf.h3("Fields");
          buf.beginList();
-         for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) {
+         for (int f = 0; f < numFields; f++) {
            sun.jvm.hotspot.oops.Field field = klass.getFieldByIndex(f);
            String f_name = ((NamedFieldIdentifier)field.getID()).getName();
            Symbol f_sig  = field.getSignature();
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,31 +24,33 @@
 
 package sun.jvm.hotspot.utilities;
 
+import sun.jvm.hotspot.runtime.BasicType;
+
 public class ConstantTag {
   // These replicated from the VM to save space
-  private static int JVM_CONSTANT_Utf8                    = 1;
-  private static int JVM_CONSTANT_Unicode                 = 2; // unused
-  private static int JVM_CONSTANT_Integer                 = 3;
-  private static int JVM_CONSTANT_Float                   = 4;
-  private static int JVM_CONSTANT_Long                    = 5;
-  private static int JVM_CONSTANT_Double                  = 6;
-  private static int JVM_CONSTANT_Class                   = 7;
-  private static int JVM_CONSTANT_String                  = 8;
-  private static int JVM_CONSTANT_Fieldref                = 9;
-  private static int JVM_CONSTANT_Methodref               = 10;
-  private static int JVM_CONSTANT_InterfaceMethodref      = 11;
-  private static int JVM_CONSTANT_NameAndType             = 12;
-  private static int JVM_CONSTANT_MethodHandle            = 15;  // JSR 292
-  private static int JVM_CONSTANT_MethodType              = 16;  // JSR 292
-  //      static int JVM_CONSTANT_(unused)                = 17;  // JSR 292 early drafts only
-  private static int JVM_CONSTANT_InvokeDynamic           = 18;  // JSR 292
-  private static int JVM_CONSTANT_Invalid                 = 0;   // For bad value initialization
-  private static int JVM_CONSTANT_UnresolvedClass         = 100; // Temporary tag until actual use
-  private static int JVM_CONSTANT_ClassIndex              = 101; // Temporary tag while constructing constant pool
-  private static int JVM_CONSTANT_UnresolvedString        = 102; // Temporary tag until actual use
-  private static int JVM_CONSTANT_StringIndex             = 103; // Temporary tag while constructing constant pool
-  private static int JVM_CONSTANT_UnresolvedClassInError  = 104; // Resolution failed
-  private static int JVM_CONSTANT_Object                  = 105; // Required for BoundMethodHandle arguments.
+  private static final int JVM_CONSTANT_Utf8                    = 1;
+  private static final int JVM_CONSTANT_Unicode                 = 2; // unused
+  private static final int JVM_CONSTANT_Integer                 = 3;
+  private static final int JVM_CONSTANT_Float                   = 4;
+  private static final int JVM_CONSTANT_Long                    = 5;
+  private static final int JVM_CONSTANT_Double                  = 6;
+  private static final int JVM_CONSTANT_Class                   = 7;
+  private static final int JVM_CONSTANT_String                  = 8;
+  private static final int JVM_CONSTANT_Fieldref                = 9;
+  private static final int JVM_CONSTANT_Methodref               = 10;
+  private static final int JVM_CONSTANT_InterfaceMethodref      = 11;
+  private static final int JVM_CONSTANT_NameAndType             = 12;
+  private static final int JVM_CONSTANT_MethodHandle            = 15;  // JSR 292
+  private static final int JVM_CONSTANT_MethodType              = 16;  // JSR 292
+  //      static final int JVM_CONSTANT_(unused)                = 17;  // JSR 292 early drafts only
+  private static final int JVM_CONSTANT_InvokeDynamic           = 18;  // JSR 292
+  private static final int JVM_CONSTANT_Invalid                 = 0;   // For bad value initialization
+  private static final int JVM_CONSTANT_UnresolvedClass         = 100; // Temporary tag until actual use
+  private static final int JVM_CONSTANT_ClassIndex              = 101; // Temporary tag while constructing constant pool
+  private static final int JVM_CONSTANT_UnresolvedString        = 102; // Temporary tag until actual use
+  private static final int JVM_CONSTANT_StringIndex             = 103; // Temporary tag while constructing constant pool
+  private static final int JVM_CONSTANT_UnresolvedClassInError  = 104; // Resolution failed
+  private static final int JVM_CONSTANT_Object                  = 105; // Required for BoundMethodHandle arguments.
 
   // JVM_CONSTANT_MethodHandle subtypes //FIXME: connect these to data structure
   private static int JVM_REF_getField                = 1;
@@ -99,4 +101,31 @@
   public boolean isKlassReference()   { return isKlassIndex() || isUnresolvedKlass(); }
   public boolean isFieldOrMethod()    { return isField() || isMethod() || isInterfaceMethod(); }
   public boolean isSymbol()           { return isUtf8(); }
+
+  public BasicType basicType() {
+    switch (tag) {
+    case JVM_CONSTANT_Integer :
+      return BasicType.T_INT;
+    case JVM_CONSTANT_Float :
+      return BasicType.T_FLOAT;
+    case JVM_CONSTANT_Long :
+      return BasicType.T_LONG;
+    case JVM_CONSTANT_Double :
+      return BasicType.T_DOUBLE;
+
+    case JVM_CONSTANT_Class :
+    case JVM_CONSTANT_String :
+    case JVM_CONSTANT_UnresolvedClass :
+    case JVM_CONSTANT_UnresolvedClassInError :
+    case JVM_CONSTANT_ClassIndex :
+    case JVM_CONSTANT_UnresolvedString :
+    case JVM_CONSTANT_StringIndex :
+    case JVM_CONSTANT_MethodHandle :
+    case JVM_CONSTANT_MethodType :
+    case JVM_CONSTANT_Object :
+      return BasicType.T_OBJECT;
+    default:
+      throw new InternalError("unexpected tag: " + tag);
+    }
+  }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/GenericGrowableArray.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.utilities;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class GenericGrowableArray extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("GenericGrowableArray");
+    _arena_field = type.getAddressField("_arena");
+    _max_field = new CIntField(type.getCIntegerField("_max"), 0);
+    _len_field = new CIntField(type.getCIntegerField("_len"), 0);
+  }
+
+  private static AddressField _arena_field;
+  private static CIntField _max_field;
+  private static CIntField _len_field;
+
+  public int max() {
+    return (int)_max_field.getValue(getAddress());
+  }
+
+  public int length() {
+    return (int)_len_field.getValue(getAddress());
+  }
+
+  public GenericGrowableArray(Address addr) {
+    super(addr);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/GrowableArray.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.utilities;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.types.*;
+
+public class GrowableArray<T> extends GenericGrowableArray {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
+    Type type      = db.lookupType("GrowableArray<int>");
+    dataField = type.getAddressField("_data");
+  }
+
+  private static AddressField dataField;
+
+  private InstanceConstructor<T> virtualConstructor;
+
+  public static <S> GrowableArray<S> create(Address addr, InstanceConstructor<S> v) {
+    if (addr == null) return null;
+    return new GrowableArray<S>(addr, v);
+  }
+
+  public T at(int i) {
+    if (i < 0 || i >= length()) throw new ArrayIndexOutOfBoundsException(i);
+    Address data = dataField.getValue(getAddress());
+    Address addr = data.getAddressAt(i * VM.getVM().getAddressSize());
+    if (addr == null) return null;
+    return (T) virtualConstructor.instantiateWrapperFor(addr);
+  }
+
+  private GrowableArray(Address addr, InstanceConstructor<T> v) {
+    super(addr);
+    virtualConstructor = v;
+  }
+}
--- a/agent/test/jdi/sasanity.sh	Wed Sep 28 23:13:07 2011 +0100
+++ b/agent/test/jdi/sasanity.sh	Thu Dec 22 15:46:11 2011 +0000
@@ -76,5 +76,5 @@
   sleep 2
 done
 
-$jdk/bin/java -showversion ${OPTIONS} -classpath $javacp SASanityChecker $pid
+$jdk/bin/java -showversion ${OPTIONS} -classpath $javacp $* SASanityChecker $pid
 kill -9 $pid
--- a/make/Makefile	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/Makefile	Thu Dec 22 15:46:11 2011 +0000
@@ -346,6 +346,20 @@
 	$(install-file)
 $(EXPORT_SERVER_DIR)/64/%.so:    $(C2_DIR)/%.so
 	$(install-file)
+
+# Debug info for shared library
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_CLIENT_DIR)/%.debuginfo:       $(C1_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_CLIENT_DIR)/64/%.debuginfo:    $(C1_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.debuginfo:       $(C2_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_SERVER_DIR)/64/%.debuginfo:    $(C2_DIR)/%.debuginfo
+	$(install-file)
   endif
 endif
 
--- a/make/hotspot_version	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/hotspot_version	Thu Dec 22 15:46:11 2011 +0000
@@ -33,13 +33,13 @@
 # Don't put quotes (fail windows build).
 HOTSPOT_VM_COPYRIGHT=Copyright 2011
 
-HS_MAJOR_VER=21
+HS_MAJOR_VER=22
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=17
+HS_BUILD_NUMBER=10
 
 JDK_MAJOR_VER=1
-JDK_MINOR_VER=7
+JDK_MINOR_VER=8
 JDK_MICRO_VER=0
 
 # Previous (bootdir) JDK version
-JDK_PREVIOUS_VERSION=1.6.0
+JDK_PREVIOUS_VERSION=1.7.0
--- a/make/jprt.gmk	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/jprt.gmk	Thu Dec 22 15:46:11 2011 +0000
@@ -34,13 +34,13 @@
 endif
 
 jprt_build_productEmb:
-	$(MAKE) JAVASE_EMBEDDED=true jprt_build_product
+	$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_product
 
 jprt_build_debugEmb:
-	$(MAKE) JAVASE_EMBEDDED=true jprt_build_debug
+	$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_debug
 
 jprt_build_fastdebugEmb:
-	$(MAKE) JAVASE_EMBEDDED=true jprt_build_fastdebug
+	$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_fastdebug
 
 jprt_build_productOpen:
 	$(MAKE) OPENJDK=true jprt_build_product
--- a/make/jprt.properties	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/jprt.properties	Thu Dec 22 15:46:11 2011 +0000
@@ -50,7 +50,7 @@
 #       sparc etc.
 
 # Define the Solaris platforms we want for the various releases
-
+jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7b107=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7temp=solaris_sparc_5.10
@@ -64,6 +64,7 @@
 jprt.my.solaris.sparc.ejdk6=${jprt.my.solaris.sparc.jdk6}
 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
 
+jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7b107=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7temp=solaris_sparcv9_5.10
@@ -77,6 +78,7 @@
 jprt.my.solaris.sparcv9.ejdk6=${jprt.my.solaris.sparcv9.jdk6}
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
+jprt.my.solaris.i586.jdk8=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7b107=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7temp=solaris_i586_5.10
@@ -90,6 +92,7 @@
 jprt.my.solaris.i586.ejdk6=${jprt.my.solaris.i586.jdk6}
 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
 
+jprt.my.solaris.x64.jdk8=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7b107=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7temp=solaris_x64_5.10
@@ -103,6 +106,7 @@
 jprt.my.solaris.x64.ejdk6=${jprt.my.solaris.x64.jdk6}
 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
 
+jprt.my.linux.i586.jdk8=linux_i586_2.6
 jprt.my.linux.i586.jdk7=linux_i586_2.6
 jprt.my.linux.i586.jdk7b107=linux_i586_2.6
 jprt.my.linux.i586.jdk7temp=linux_i586_2.6
@@ -116,6 +120,7 @@
 jprt.my.linux.i586.ejdk6=linux_i586_2.6
 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
 
+jprt.my.linux.x64.jdk8=linux_x64_2.6
 jprt.my.linux.x64.jdk7=linux_x64_2.6
 jprt.my.linux.x64.jdk7b107=linux_x64_2.6
 jprt.my.linux.x64.jdk7temp=linux_x64_2.6
@@ -129,6 +134,7 @@
 jprt.my.linux.x64.ejdk6=${jprt.my.linux.x64.jdk6}
 jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
 
+jprt.my.linux.ppc.jdk8=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7b107=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7temp=linux_ppc_2.6
@@ -136,6 +142,7 @@
 jprt.my.linux.ppc.ejdk7=linux_ppc_2.6
 jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
 
+jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7b107=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7temp=linux_ppcv2_2.6
@@ -143,6 +150,7 @@
 jprt.my.linux.ppcv2.ejdk7=linux_ppcv2_2.6
 jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
 
+jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7b107=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7temp=linux_ppcsflt_2.6
@@ -150,6 +158,7 @@
 jprt.my.linux.ppcsflt.ejdk7=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
 
+jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7b107=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7temp=linux_armvfp_2.6
@@ -157,6 +166,7 @@
 jprt.my.linux.armvfp.ejdk7=linux_armvfp_2.6
 jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
 
+jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7b107=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7temp=linux_armsflt_2.6
@@ -164,6 +174,7 @@
 jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6
 jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
 
+jprt.my.windows.i586.jdk8=windows_i586_5.1
 jprt.my.windows.i586.jdk7=windows_i586_5.1
 jprt.my.windows.i586.jdk7b107=windows_i586_5.0
 jprt.my.windows.i586.jdk7temp=windows_i586_5.0
@@ -177,6 +188,7 @@
 jprt.my.windows.i586.ejdk6=${jprt.my.windows.i586.jdk6}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
+jprt.my.windows.x64.jdk8=windows_x64_5.2
 jprt.my.windows.x64.jdk7=windows_x64_5.2
 jprt.my.windows.x64.jdk7b107=windows_x64_5.2
 jprt.my.windows.x64.jdk7temp=windows_x64_5.2
@@ -218,6 +230,7 @@
 jprt.build.targets.all=${jprt.build.targets.standard}, \
     ${jprt.build.targets.embedded}, ${jprt.build.targets.open}
 
+jprt.build.targets.jdk8=${jprt.build.targets.all}
 jprt.build.targets.jdk7=${jprt.build.targets.all}
 jprt.build.targets.jdk7temp=${jprt.build.targets.all}
 jprt.build.targets.jdk7b107=${jprt.build.targets.all}
@@ -494,6 +507,7 @@
   ${jprt.my.windows.x64.test.targets}
 
 
+jprt.test.targets.jdk8=${jprt.test.targets.standard}
 jprt.test.targets.jdk7=${jprt.test.targets.standard}
 jprt.test.targets.jdk7temp=${jprt.test.targets.standard}
 jprt.test.targets.jdk7b105=${jprt.test.targets.standard}
@@ -534,6 +548,7 @@
 jprt.make.rule.test.targets.embedded = \
   ${jprt.make.rule.test.targets.standard.client}
 
+jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7temp=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7b107=${jprt.make.rule.test.targets.standard}
--- a/make/linux/Makefile	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/Makefile	Thu Dec 22 15:46:11 2011 +0000
@@ -210,6 +210,7 @@
 BUILDTREE_MAKE    = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
 BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
 BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
+BUILDTREE_VARS   += OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY)
 
 BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
 
--- a/make/linux/makefiles/build_vm_def.sh	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/build_vm_def.sh	Thu Dec 22 15:46:11 2011 +0000
@@ -7,6 +7,10 @@
 NM=nm
 fi
 
-$NM --defined-only $* | awk '
-   { if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";" }
-   '
+$NM --defined-only $* \
+    | awk '{
+              if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";"
+              if ($3 ~ /^UseSharedSpaces$/) print "\t" $3 ";"
+              if ($3 ~ /^_ZN9Arguments17SharedArchivePathE$/) print "\t" $3 ";"
+          }' \
+    | sort -u
--- a/make/linux/makefiles/buildtree.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/buildtree.make	Thu Dec 22 15:46:11 2011 +0000
@@ -233,6 +233,10 @@
 	echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
 	[ -n "$(CFLAGS_BROWSE)" ] && \
 	    echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \
+	[ -n "$(OBJCOPY)" ] && \
+	    echo && echo "OBJCOPY = $(OBJCOPY)"; \
+	[ -n "$(STRIP_POLICY)" ] && \
+	    echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \
 	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
--- a/make/linux/makefiles/defs.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/defs.make	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,67 @@
   HS_ARCH          = ppc
 endif
 
+# determine if HotSpot is being built in JDK6 or earlier version
+JDK6_OR_EARLIER=0
+ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
+  # if the longer variable names (newer build style) are set, then check those
+  ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+else
+  # the longer variables aren't set so check the shorter variable names
+  ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+endif
+
+ifeq ($(JDK6_OR_EARLIER),0)
+  # Full Debug Symbols is supported on JDK7 or newer
+
+  # Default OBJCOPY comes from GNU Binutils on Linux:
+  DEF_OBJCOPY=/usr/bin/objcopy
+  ifdef CROSS_COMPILE_ARCH
+    # don't try to generate .debuginfo files when cross compiling
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: cross compiling for ARCH $(CROSS_COMPILE_ARCH)," \
+        "skipping .debuginfo generation.")
+    OBJCOPY=
+  else
+    OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
+    ifneq ($(ALT_OBJCOPY),)
+      _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
+      # disable .debuginfo support by setting ALT_OBJCOPY to a non-existent path
+      OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
+    endif
+  endif
+  
+  ifeq ($(OBJCOPY),)
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files.")
+  else
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo files.")
+  
+    # Library stripping policies for .debuginfo configs:
+    #   all_strip - strips everything from the library
+    #   min_strip - strips most stuff from the library; leaves minimum symbols
+    #   no_strip  - does not strip the library at all
+    #
+    # Oracle security policy requires "all_strip". A waiver was granted on
+    # 2011.09.01 that permits using "min_strip" in the Java JDK and Java JRE.
+    #
+    DEF_STRIP_POLICY="min_strip"
+    ifeq ($(ALT_STRIP_POLICY),)
+      STRIP_POLICY=$(DEF_STRIP_POLICY)
+    else
+      STRIP_POLICY=$(ALT_STRIP_POLICY)
+    endif
+  
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
+  endif
+endif
+
 JDK_INCLUDE_SUBDIR=linux
 
 # FIXUP: The subdirectory for a debug build is NOT the same on all platforms
@@ -123,18 +184,28 @@
 
 # client and server subdirectories have symbolic links to ../libjsig.so
 EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.so
+ifneq ($(OBJCOPY),)
+  EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+endif
+
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 
 ifndef BUILD_CLIENT_ONLY
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.so
+  ifneq ($(OBJCOPY),)
+    EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
+  endif
 endif
 
 ifneq ($(ZERO_BUILD), true)
   ifeq ($(ARCH_DATA_MODEL), 32)
-    EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so 
+    ifneq ($(OBJCOPY),)
+      EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
+    endif
   endif
 endif
 
@@ -144,6 +215,10 @@
                         $(EXPORT_LIB_DIR)/sa-jdi.jar 
 ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so \
                         $(EXPORT_LIB_DIR)/sa-jdi.jar 
+ifneq ($(OBJCOPY),)
+  ADD_SA_BINARIES/x86   += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+  ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+endif
 ADD_SA_BINARIES/ppc   = 
 ADD_SA_BINARIES/ia64  = 
 ADD_SA_BINARIES/arm   = 
--- a/make/linux/makefiles/gcc.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/gcc.make	Thu Dec 22 15:46:11 2011 +0000
@@ -244,6 +244,26 @@
 DEBUG_CFLAGS += -gstabs
 endif
 
+ifneq ($(OBJCOPY),)
+  FASTDEBUG_CFLAGS/ia64  = -g
+  FASTDEBUG_CFLAGS/amd64 = -g
+  FASTDEBUG_CFLAGS/arm   = -g
+  FASTDEBUG_CFLAGS/ppc   = -g
+  FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
+  ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
+    FASTDEBUG_CFLAGS += -gstabs
+  endif
+
+  OPT_CFLAGS/ia64  = -g
+  OPT_CFLAGS/amd64 = -g
+  OPT_CFLAGS/arm   = -g
+  OPT_CFLAGS/ppc   = -g
+  OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
+  ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
+    OPT_CFLAGS += -gstabs
+  endif
+endif
+
 # DEBUG_BINARIES overrides everything, use full -g debug information
 ifeq ($(DEBUG_BINARIES), true)
   DEBUG_CFLAGS = -g
@@ -261,3 +281,9 @@
 ifdef MINIMIZE_RAM_USAGE
 CFLAGS += -DMINIMIZE_RAM_USAGE
 endif
+
+ifdef CROSS_COMPILE_ARCH
+  STRIP = $(ALT_COMPILER_PATH)/strip
+else
+  STRIP = strip
+endif
--- a/make/linux/makefiles/jsig.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/jsig.make	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -31,9 +31,13 @@
 JSIG_G    = $(JSIG)$(G_SUFFIX)
 LIBJSIG_G = lib$(JSIG_G).so
 
+LIBJSIG_DEBUGINFO   = lib$(JSIG).debuginfo
+LIBJSIG_G_DEBUGINFO = lib$(JSIG_G).debuginfo
+
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
-DEST_JSIG  = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG           = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO)
 
 LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig
 
@@ -54,9 +58,24 @@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
 	$(QUIETLY) [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
+ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+    # implied else here is no stripping at all
+    endif
+  endif
+	[ -f $(LIBJSIG_G_DEBUGINFO) ] || { ln -s $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO); }
+endif
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
+	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
 	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
 
 .PHONY: install_jsig
--- a/make/linux/makefiles/mapfile-vers-debug	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/mapfile-vers-debug	Thu Dec 22 15:46:11 2011 +0000
@@ -244,24 +244,6 @@
                 JVM_Yield;
                 JVM_handle_linux_signal;
 
-                # Old reflection routines
-                # These do not need to be present in the product build in JDK 1.4
-                # but their code has not been removed yet because there will not
-                # be a substantial code savings until JVM_InvokeMethod and
-                # JVM_NewInstanceFromConstructor can also be removed; see
-                # reflectionCompat.hpp.
-                JVM_GetClassConstructor;
-                JVM_GetClassConstructors;
-                JVM_GetClassField;
-                JVM_GetClassFields;
-                JVM_GetClassMethod;
-                JVM_GetClassMethods;
-                JVM_GetField;
-                JVM_GetPrimitiveField;
-                JVM_NewInstance;
-                JVM_SetField;
-                JVM_SetPrimitiveField;
-
                 # debug JVM
                 JVM_AccessVMBooleanFlag;
                 JVM_AccessVMIntFlag;
--- a/make/linux/makefiles/mapfile-vers-product	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/mapfile-vers-product	Thu Dec 22 15:46:11 2011 +0000
@@ -244,24 +244,6 @@
                 JVM_Yield;
                 JVM_handle_linux_signal;
 
-                # Old reflection routines
-                # These do not need to be present in the product build in JDK 1.4
-                # but their code has not been removed yet because there will not
-                # be a substantial code savings until JVM_InvokeMethod and
-                # JVM_NewInstanceFromConstructor can also be removed; see
-                # reflectionCompat.hpp.
-                JVM_GetClassConstructor;
-                JVM_GetClassConstructors;
-                JVM_GetClassField;
-                JVM_GetClassFields;
-                JVM_GetClassMethod;
-                JVM_GetClassMethods;
-                JVM_GetField;
-                JVM_GetPrimitiveField;
-                JVM_NewInstance;
-                JVM_SetField;
-                JVM_SetPrimitiveField;
-
                 # miscellaneous functions
                 jio_fprintf;
                 jio_printf;
--- a/make/linux/makefiles/product.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/product.make	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -46,13 +46,10 @@
 
 # use -g to strip library as -x will discard its symbol table; -x is fine for
 # executables.
-ifdef CROSS_COMPILE_ARCH
-  STRIP = $(ALT_COMPILER_PATH)/strip
-else
-  STRIP = strip
-endif
+# Note: these macros are not used in .debuginfo configs
 STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
 STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
 
-# Don't strip in VM build; JDK build will strip libraries later
+# If we can create .debuginfo files, then the VM is stripped in vm.make
+# and this macro is not used.
 # LINK_LIB.CC/POST_HOOK += $(STRIP_$(LINK_INTO))
--- a/make/linux/makefiles/saproc.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/saproc.make	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,9 @@
 SAPROC_G = $(SAPROC)$(G_SUFFIX)
 LIBSAPROC_G = lib$(SAPROC_G).so
 
+LIBSAPROC_DEBUGINFO   = lib$(SAPROC).debuginfo
+LIBSAPROC_G_DEBUGINFO = lib$(SAPROC_G).debuginfo
+
 AGENT_DIR = $(GAMMADIR)/agent
 
 SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)
@@ -45,7 +48,8 @@
 
 SAMAPFILE = $(SASRCDIR)/mapfile
 
-DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC           = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO)
 
 # DEBUG_BINARIES overrides everything, use full -g debug information
 ifeq ($(DEBUG_BINARIES), true)
@@ -82,10 +86,25 @@
 	           -o $@                                                \
 	           -lthread_db
 	$(QUIETLY) [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
+ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+    # implied else here is no stripping at all
+    endif
+  endif
+	[ -f $(LIBSAPROC_G_DEBUGINFO) ] || { ln -s $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO); }
+endif
 
 install_saproc: $(BUILDLIBSAPROC)
 	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
 	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
+	  test -f $(LIBSAPROC_DEBUGINFO) &&                  \
+	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO); \
 	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
 	fi
 
--- a/make/linux/makefiles/vm.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/linux/makefiles/vm.make	Thu Dec 22 15:46:11 2011 +0000
@@ -60,10 +60,16 @@
 # The order is important for the precompiled headers to work.
 INCLUDES += $(PRECOMPILED_HEADER_DIR:%=-I%) $(Src_Dirs_I:%=-I%)
 
-ifeq (${VERSION}, debug)
+# SYMFLAG is used by {jsig,saproc}.make
+ifneq ($(OBJCOPY),)
+  # always build with debug info when we can create .debuginfo files
   SYMFLAG = -g
 else
-  SYMFLAG =
+  ifeq (${VERSION}, debug)
+    SYMFLAG = -g
+  else
+    SYMFLAG =
+  endif
 endif
 
 # HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined 
@@ -132,6 +138,9 @@
 LIBJVM   = lib$(JVM).so
 LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
 
+LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
+LIBJVM_G_DEBUGINFO = lib$(JVM)$(G_SUFFIX).debuginfo
+
 SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
 
 SOURCE_PATHS=\
@@ -317,11 +326,30 @@
 	      fi                                                        \
             fi 								\
 	}
+ifeq ($(CROSS_COMPILE_ARCH),)
+  ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+      # implied else here is no stripping at all
+      endif
+    endif
+	$(QUIETLY) [ -f $(LIBJVM_G_DEBUGINFO) ] || ln -s $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
+  endif
+endif
 
-DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)
+DEST_SUBDIR        = $(JDK_LIBDIR)/$(VM_SUBDIR)
+DEST_JVM           = $(DEST_SUBDIR)/$(LIBJVM)
+DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO)
 
 install_jvm: $(LIBJVM)
 	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
+	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
 	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
 
 #----------------------------------------------------------------------
--- a/make/sa.files	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/sa.files	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,7 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/tree/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/c1/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/ci/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/code/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \
@@ -54,9 +55,6 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/amd64/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/sparc/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dummy/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/ia64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \
@@ -75,13 +73,13 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/sparc/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/sparc/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/coff/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/amd64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/ia64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/x86/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/g1/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/shared/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_interface/*.java \
@@ -89,7 +87,9 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/jdi/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/livejvm/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/memory/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/oops/*.java 
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/oops/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/opto/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/prims/*.java 
 
 
 AGENT_FILES2 = \
@@ -106,9 +106,6 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_sparc/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/sparc/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_amd64/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_ia64/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/jcore/*.java \
--- a/make/solaris/Makefile	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/Makefile	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -168,6 +168,7 @@
 BUILDTREE_MAKE    = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
 BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) ARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
 BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
+BUILDTREE_VARS   += OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY)
 
 BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/solaris/makefiles/build_vm_def.sh	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+/usr/ccs/bin/nm -p $* \
+    | awk '{
+          if ($2 == "U") next
+          if ($3 ~ /^__1c.*__vtbl_$/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";"
+          if ($3 ~ /^UseSharedSpaces$/) print "\t" $3 ";"
+          if ($3 ~ /^__1cJArgumentsRSharedArchivePath_$/) print "\t" $3 ";"
+          }' \
+    | sort -u
--- a/make/solaris/makefiles/buildtree.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/buildtree.make	Thu Dec 22 15:46:11 2011 +0000
@@ -226,6 +226,10 @@
 	echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
 	[ -n "$(CFLAGS_BROWSE)" ] && \
 	    echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \
+	[ -n "$(OBJCOPY)" ] && \
+	    echo && echo "OBJCOPY = $(OBJCOPY)"; \
+	[ -n "$(STRIP_POLICY)" ] && \
+	    echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \
 	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
--- a/make/solaris/makefiles/debug.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/debug.make	Thu Dec 22 15:46:11 2011 +0000
@@ -41,8 +41,7 @@
 
 # Linker mapfiles
 MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
-          $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \
-          $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
+          $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
 
 # This mapfile is only needed when compiling with dtrace support, 
 # and mustn't be otherwise.
--- a/make/solaris/makefiles/defs.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/defs.make	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,79 @@
   endif
 endif
 
+# determine if HotSpot is being built in JDK6 or earlier version
+JDK6_OR_EARLIER=0
+ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
+  # if the longer variable names (newer build style) are set, then check those
+  ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+else
+  # the longer variables aren't set so check the shorter variable names
+  ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+endif
+
+ifeq ($(JDK6_OR_EARLIER),0)
+  # Full Debug Symbols is supported on JDK7 or newer
+
+ifdef ENABLE_FULL_DEBUG_SYMBOLS
+  # Only check for Full Debug Symbols support on Solaris if it is
+  # specifically enabled. Hopefully, it can be enabled by default
+  # once the .debuginfo size issues are worked out.
+  
+  # Default OBJCOPY comes from the SUNWbinutils package:
+  DEF_OBJCOPY=/usr/sfw/bin/gobjcopy
+  ifeq ($(VM_PLATFORM),solaris_amd64)
+    # On Solaris AMD64/X64, gobjcopy is not happy and fails:
+    #
+    # usr/sfw/bin/gobjcopy --add-gnu-debuglink=<lib>.debuginfo <lib>.so
+    # BFD: stKPaiop: Not enough room for program headers, try linking with -N
+    # /usr/sfw/bin/gobjcopy: stKPaiop: Bad value
+    # BFD: stKPaiop: Not enough room for program headers, try linking with -N
+    # /usr/sfw/bin/gobjcopy: libsaproc.debuginfo: Bad value
+    # BFD: stKPaiop: Not enough room for program headers, try linking with -N
+    # /usr/sfw/bin/gobjcopy: stKPaiop: Bad value
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: $(DEF_OBJCOPY) is not working on Solaris AMD64/X64")
+    OBJCOPY=
+  else
+    OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
+    ifneq ($(ALT_OBJCOPY),)
+      _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
+      # disable .debuginfo support by setting ALT_OBJCOPY to a non-existent path
+      OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
+    endif
+  endif
+endif
+  
+  ifeq ($(OBJCOPY),)
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files.")
+  else
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo files.")
+  
+    # Library stripping policies for .debuginfo configs:
+    #   all_strip - strips everything from the library
+    #   min_strip - strips most stuff from the library; leaves minimum symbols
+    #   no_strip  - does not strip the library at all
+    #
+    # Oracle security policy requires "all_strip". A waiver was granted on
+    # 2011.09.01 that permits using "min_strip" in the Java JDK and Java JRE.
+    #
+    DEF_STRIP_POLICY="min_strip"
+    ifeq ($(ALT_STRIP_POLICY),)
+      STRIP_POLICY=$(DEF_STRIP_POLICY)
+    else
+      STRIP_POLICY=$(ALT_STRIP_POLICY)
+    endif
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
+  endif
+endif
+
 JDK_INCLUDE_SUBDIR=solaris
 
 # FIXUP: The subdirectory for a debug build is NOT the same on all platforms
@@ -68,27 +141,50 @@
 
 # client and server subdirectories have symbolic links to ../libjsig.so
 EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.so
+ifneq ($(OBJCOPY),)
+  EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+endif
 
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
+
 ifneq ($(BUILD_CLIENT_ONLY),true)
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.so
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_db.so
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_dtrace.so
+  ifneq ($(OBJCOPY),)
+    EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
+    EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_db.debuginfo
+    EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_dtrace.debuginfo
+  endif
 endif
 ifeq ($(ARCH_DATA_MODEL), 32)
-  EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so 
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.so 
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so
+  ifneq ($(OBJCOPY),)
+    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo 
+    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.debuginfo 
+    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.debuginfo
+    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.debuginfo
+    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.debuginfo
+  endif
   ifneq ($(BUILD_CLIENT_ONLY), true)
     EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
     EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
+    ifneq ($(OBJCOPY),)
+      EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.debuginfo
+      EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.debuginfo
+    endif
   endif
 endif
 
 EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so
+ifneq ($(OBJCOPY),)
+  EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+endif
 EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar 
--- a/make/solaris/makefiles/dtrace.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/dtrace.make	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -41,10 +41,16 @@
 LIBJVM_DB = libjvm_db.so
 LIBJVM_DB_G = libjvm$(G_SUFFIX)_db.so
 
+LIBJVM_DB_DEBUGINFO = libjvm_db.debuginfo
+LIBJVM_DB_G_DEBUGINFO = libjvm$(G_SUFFIX)_db.debuginfo
+
 JVM_DTRACE = jvm_dtrace
 LIBJVM_DTRACE = libjvm_dtrace.so
 LIBJVM_DTRACE_G = libjvm$(G_SUFFIX)_dtrace.so
 
+LIBJVM_DTRACE_DEBUGINFO = libjvm_dtrace.debuginfo
+LIBJVM_DTRACE_G_DEBUGINFO = libjvm$(G_SUFFIX)_dtrace.debuginfo
+
 JVMOFFS = JvmOffsets
 JVMOFFS.o = $(JVMOFFS).o
 GENOFFS = generate$(JVMOFFS)
@@ -89,12 +95,30 @@
 XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
 XLIBJVM_DTRACE_G = 64/$(LIBJVM_DTRACE_G)
 
+XLIBJVM_DB_DEBUGINFO = 64/$(LIBJVM_DB_DEBUGINFO)
+XLIBJVM_DB_G_DEBUGINFO = 64/$(LIBJVM_DB_G_DEBUGINFO)
+XLIBJVM_DTRACE_DEBUGINFO = 64/$(LIBJVM_DTRACE_DEBUGINFO)
+XLIBJVM_DTRACE_G_DEBUGINFO = 64/$(LIBJVM_DTRACE_G_DEBUGINFO)
+
 $(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) mkdir -p 64/ ; \
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
 	[ -f $(XLIBJVM_DB_G) ] || { ln -s $(LIBJVM_DB) $(XLIBJVM_DB_G); }
+ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(XLIBJVM_DB_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+    # implied else here is no stripping at all
+    endif
+  endif
+	[ -f $(XLIBJVM_DB_G_DEBUGINFO) ] || { ln -s $(LIBJVM_DB_DEBUGINFO) $(XLIBJVM_DB_G_DEBUGINFO); }
+endif
 
 $(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
@@ -102,6 +126,19 @@
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
 	[ -f $(XLIBJVM_DTRACE_G) ] || { ln -s $(LIBJVM_DTRACE) $(XLIBJVM_DTRACE_G); }
+ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(XLIBJVM_DTRACE_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+    # implied else here is no stripping at all
+    endif
+  endif
+	[ -f $(XLIBJVM_DTRACE_G_DEBUGINFO) ] || { ln -s $(LIBJVM_DTRACE_DEBUGINFO) $(XLIBJVM_DTRACE_G_DEBUGINFO); }
+endif
 
 endif # ifneq ("${ISA}","${BUILDARCH}")
 
@@ -148,12 +185,38 @@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
 	[ -f $(LIBJVM_DB_G) ] || { ln -s $@ $(LIBJVM_DB_G); }
+ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+    # implied else here is no stripping at all
+    endif
+  endif
+	[ -f $(LIBJVM_DB_G_DEBUGINFO) ] || { ln -s $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO); }
+endif
 
 $(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
 	[ -f $(LIBJVM_DTRACE_G) ] || { ln -s $@ $(LIBJVM_DTRACE_G); }
+ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+    # implied else here is no stripping at all
+    endif
+  endif
+	[ -f $(LIBJVM_DTRACE_G_DEBUGINFO) ] || { ln -s $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO); }
+endif
 
 $(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
              $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
--- a/make/solaris/makefiles/fastdebug.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/fastdebug.make	Thu Dec 22 15:46:11 2011 +0000
@@ -107,8 +107,7 @@
 
 # Linker mapfiles
 MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
-	  $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \
-	  $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
+	  $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
 
 # This mapfile is only needed when compiling with dtrace support, 
 # and mustn't be otherwise.
--- a/make/solaris/makefiles/jsig.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/jsig.make	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -31,9 +31,13 @@
 JSIG_G    = $(JSIG)$(G_SUFFIX)
 LIBJSIG_G = lib$(JSIG_G).so
 
+LIBJSIG_DEBUGINFO   = lib$(JSIG).debuginfo
+LIBJSIG_G_DEBUGINFO = lib$(JSIG_G).debuginfo
+
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
-DEST_JSIG  = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG           = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO)
 
 LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig
 
@@ -50,9 +54,24 @@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) -o $@ $< -ldl
 	[ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
+ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+    # implied else here is no stripping at all
+    endif
+  endif
+	[ -f $(LIBJSIG_G_DEBUGINFO) ] || { ln -s $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO); }
+endif
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
+	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
 	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
 
 .PHONY: install_jsig
--- a/make/solaris/makefiles/jvmg.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/jvmg.make	Thu Dec 22 15:46:11 2011 +0000
@@ -44,8 +44,7 @@
 
 # Linker mapfiles
 MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
-          $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \
-          $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
+          $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
 
 # This mapfile is only needed when compiling with dtrace support,
 # and mustn't be otherwise.
--- a/make/solaris/makefiles/mapfile-vers	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/mapfile-vers	Thu Dec 22 15:46:11 2011 +0000
@@ -3,7 +3,7 @@
 #
 
 #
-# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -256,6 +256,9 @@
 
 		# This is for Forte Analyzer profiling support.
 		AsyncGetCallTrace;
+
+		# INSERT VTABLE SYMBOLS HERE
+
         local:
                 *;
 };
--- a/make/solaris/makefiles/mapfile-vers-nonproduct	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-#
-
-#
-# Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-# Define public interface.
-
-SUNWprivate_1.1 {
-        global:
-		# Old reflection routines
-		# These do not need to be present in the product build in JDK 1.4
-		# but their code has not been removed yet because there will not
-		# be a substantial code savings until JVM_InvokeMethod and
-		# JVM_NewInstanceFromConstructor can also be removed; see
-		# reflectionCompat.hpp.
-		JVM_GetClassConstructor;
-		JVM_GetClassConstructors;
-		JVM_GetClassField;
-		JVM_GetClassFields;
-		JVM_GetClassMethod;
-		JVM_GetClassMethods;
-		JVM_GetField;
-		JVM_GetPrimitiveField;
-		JVM_NewInstance;
-		JVM_SetField;
-		JVM_SetPrimitiveField;
-};
--- a/make/solaris/makefiles/optimized.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/optimized.make	Thu Dec 22 15:46:11 2011 +0000
@@ -48,9 +48,7 @@
 CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
 
 # Linker mapfiles
-# NOTE: inclusion of nonproduct mapfile not necessary; read it for details
-MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
-          $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
+MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers
 
 # This mapfile is only needed when compiling with dtrace support, 
 # and mustn't be otherwise.
--- a/make/solaris/makefiles/product.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/product.make	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -58,13 +58,9 @@
 # to inhibit the effect of the previous line on CFLAGS.
 
 # Linker mapfiles
-# NOTE: inclusion of nonproduct mapfile not necessary; read it for details
-ifdef USE_GCC
 MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers
-else
-MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
-          $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
 
+ifndef USE_GCC
 # This mapfile is only needed when compiling with dtrace support, 
 # and mustn't be otherwise.
 MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
@@ -72,7 +68,8 @@
 REORDERFILE = $(GAMMADIR)/make/solaris/makefiles/reorder_$(TYPE)_$(BUILDARCH)
 endif
 
-# Don't strip in VM build; JDK build will strip libraries later
+# If we can create .debuginfo files, then the VM is stripped in vm.make
+# and this macro is not used.
 # LINK_LIB.CC/POST_HOOK += $(STRIP_LIB.CC/POST_HOOK)
 
 G_SUFFIX =
--- a/make/solaris/makefiles/saproc.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/saproc.make	Thu Dec 22 15:46:11 2011 +0000
@@ -32,6 +32,9 @@
 SAPROC_G = $(SAPROC)$(G_SUFFIX)
 LIBSAPROC_G = lib$(SAPROC_G).so
 
+LIBSAPROC_DEBUGINFO   = lib$(SAPROC).debuginfo
+LIBSAPROC_G_DEBUGINFO = lib$(SAPROC_G).debuginfo
+
 AGENT_DIR = $(GAMMADIR)/agent
 
 SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)/proc
@@ -40,7 +43,8 @@
 
 SAMAPFILE = $(SASRCDIR)/mapfile
 
-DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC           = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO)
 
 # if $(AGENT_DIR) does not exist, we don't build SA
 
@@ -72,9 +76,9 @@
           -e '/^[0-4]\. /b' \
           -e '/^5\.[0-9] /b' \
           -e '/^5\.10 /b' \
-          -e '/ snv_[0-9][0-9]$/b' \
-          -e '/ snv_[01][0-4][0-9]$/b' \
-          -e '/ snv_15[0-8]$/b' \
+          -e '/ snv_[0-9][0-9]$$/b' \
+          -e '/ snv_[01][0-4][0-9]$$/b' \
+          -e '/ snv_15[0-8]$$/b' \
           -e 's/.*/-DSOLARIS_11_B159_OR_LATER/' \
           -e 'p' \
           )
@@ -101,10 +105,25 @@
 	           -o $@                                                \
 	           -ldl -ldemangle -lthread -lc
 	[ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
+ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+    # implied else here is no stripping at all
+    endif
+  endif
+	[ -f $(LIBSAPROC_G_DEBUGINFO) ] || { ln -s $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO); }
+endif
 
 install_saproc: $(BULDLIBSAPROC)
 	$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then             \
 	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
+	  test -f $(LIBSAPROC_DEBUGINFO) &&             \
+	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO); \
 	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
 	fi
 
--- a/make/solaris/makefiles/sparcWorks.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/sparcWorks.make	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -148,6 +148,9 @@
 # -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
 CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
 
+# Compiler warnings are treated as errors
+CFLAGS_WARN = -xwe
+
 ################################################
 # Begin current (>=5.9) Forte compiler options #
 #################################################
@@ -478,9 +481,18 @@
 endif
 
 # Flags for Debugging
+# The -g0 setting allows the C++ frontend to inline, which is a big win.
+# The -xs setting disables 'lazy debug info' which puts everything in
+# the .so instead of requiring the '.o' files.
+ifneq ($(OBJCOPY),)
+  OPT_CFLAGS += -g0 -xs
+endif
 DEBUG_CFLAGS = -g
 FASTDEBUG_CFLAGS = -g0
-# The -g0 setting allows the C++ frontend to inline, which is a big win.
+ifneq ($(OBJCOPY),)
+  DEBUG_CFLAGS += -xs
+  FASTDEBUG_CFLAGS += -xs
+endif
 
 # Special global options for SS12
 ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
@@ -499,6 +511,9 @@
 # data using a unique globalization prefix. Instead force the use of
 # a static globalization prefix based on the source filepath so the
 # objects from two identical compilations are the same.
+# EXTRA_CFLAGS only covers vm_version.cpp for some reason
+#EXTRA_CFLAGS += -Qoption ccfe -xglobalstatic
+#OPT_CFLAGS += -Qoption ccfe -xglobalstatic
 #DEBUG_CFLAGS += -Qoption ccfe -xglobalstatic
 #FASTDEBUG_CFLAGS += -Qoption ccfe -xglobalstatic
 
@@ -559,6 +574,8 @@
 # since the hook must terminate itself as a valid command.)
 
 # Also, strip debug and line number information (worth about 1.7Mb).
+# If we can create .debuginfo files, then the VM is stripped in vm.make
+# and this macro is not used.
 STRIP_LIB.CC/POST_HOOK = $(STRIP) -x $@ || exit 1;
 # STRIP_LIB.CC/POST_HOOK is incorporated into LINK_LIB.CC/POST_HOOK
 # in certain configurations, such as product.make.  Other configurations,
--- a/make/solaris/makefiles/vm.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/solaris/makefiles/vm.make	Thu Dec 22 15:46:11 2011 +0000
@@ -55,10 +55,17 @@
 Src_Dirs_I += $(GENERATED)
 INCLUDES += $(Src_Dirs_I:%=-I%)
 
-ifeq (${VERSION}, debug)
-  SYMFLAG = -g
+# SYMFLAG is used by {dtrace,jsig,saproc}.make.
+ifneq ($(OBJCOPY),)
+  # always build with debug info when we can create .debuginfo files
+  # and disable 'lazy debug info' so the .so has everything.
+  SYMFLAG = -g -xs
 else
-  SYMFLAG =
+  ifeq (${VERSION}, debug)
+    SYMFLAG = -g
+  else
+    SYMFLAG =
+  endif
 endif
 
 # The following variables are defined in the generated flags.make file.
@@ -148,6 +155,9 @@
 LIBJVM   = lib$(JVM).so
 LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
 
+LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
+LIBJVM_G_DEBUGINFO = lib$(JVM)$(G_SUFFIX).debuginfo
+
 SPECIAL_PATHS:=adlc c1 dist gc_implementation opto shark libadt
 
 SOURCE_PATHS=\
@@ -220,14 +230,24 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT)
+mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def
 	rm -f $@
-	cat $^ > $@
+	cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \
+	    | $(NAWK) '{                                         \
+	              if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") {  \
+	                  system ("cat vm.def");                 \
+	              } else {                                   \
+	                  print $$0;                             \
+	              }                                          \
+	          }' > $@
 
 mapfile_reorder : mapfile $(MAPFILE_DTRACE_OPT) $(REORDERFILE)
 	rm -f $@
 	cat $^ > $@
 
+vm.def: $(Obj_Files)
+	sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@
+
 ifeq ($(LINK_INTO),AOUT)
   LIBJVM.o                 =
   LIBJVM_MAPFILE           =
@@ -263,13 +283,30 @@
 	$(QUIETLY) rm -f $@.1 && ln -s $@ $@.1
 	$(QUIETLY) [ -f $(LIBJVM_G) ] || ln -s $@ $(LIBJVM_G)
 	$(QUIETLY) [ -f $(LIBJVM_G).1 ] || ln -s $@.1 $(LIBJVM_G).1
+ifneq ($(OBJCOPY),)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+    # implied else here is no stripping at all
+    endif
+  endif
+	$(QUIETLY) [ -f $(LIBJVM_G_DEBUGINFO) ] || ln -s $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
+endif
 endif # filter -sbfast -xsbfast
 
 
-DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)
+DEST_SUBDIR        = $(JDK_LIBDIR)/$(VM_SUBDIR)
+DEST_JVM           = $(DEST_SUBDIR)/$(LIBJVM)
+DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO)
 
 install_jvm: $(LIBJVM)
 	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
+	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
 	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
 
 #----------------------------------------------------------------------
--- a/make/windows/makefiles/defs.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/windows/makefiles/defs.make	Thu Dec 22 15:46:11 2011 +0000
@@ -171,19 +171,20 @@
 endif
 
 EXPORT_SERVER_DIR = $(EXPORT_JRE_BIN_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_JRE_BIN_DIR)/client
+EXPORT_KERNEL_DIR = $(EXPORT_JRE_BIN_DIR)/kernel
+
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.dll
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.pdb
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.map
 EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
 ifeq ($(ARCH_DATA_MODEL), 32)
-  EXPORT_CLIENT_DIR = $(EXPORT_JRE_BIN_DIR)/client
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/jvm.dll
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/jvm.pdb
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/jvm.map
   # kernel vm
-  EXPORT_KERNEL_DIR = $(EXPORT_JRE_BIN_DIR)/kernel
   EXPORT_LIST += $(EXPORT_KERNEL_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_KERNEL_DIR)/jvm.dll
   EXPORT_LIST += $(EXPORT_KERNEL_DIR)/jvm.pdb
--- a/make/windows/makefiles/sa.make	Wed Sep 28 23:13:07 2011 +0100
+++ b/make/windows/makefiles/sa.make	Thu Dec 22 15:46:11 2011 +0000
@@ -66,7 +66,7 @@
 	$(QUIETLY) mkdir $(SA_CLASSDIR)\sun\jvm\hotspot\ui\resources
 	$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
 	$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)
-	$(RUN_JAR) cf $@ -C saclasses .
+	$(RUN_JAR) cf $@ -C $(SA_CLASSDIR) .
 	$(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector
 	$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal
 	$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext 
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -100,12 +100,19 @@
   case call_op:    s = "call"; break;
   case branch_op:
     switch (inv_op2(inst)) {
-      case bpr_op2:    s = "bpr";  break;
       case fb_op2:     s = "fb";   break;
       case fbp_op2:    s = "fbp";  break;
       case br_op2:     s = "br";   break;
       case bp_op2:     s = "bp";   break;
       case cb_op2:     s = "cb";   break;
+      case bpr_op2: {
+        if (is_cbcond(inst)) {
+          s = is_cxb(inst) ? "cxb" : "cwb";
+        } else {
+          s = "bpr";
+        }
+        break;
+      }
       default:         s = "????"; break;
     }
   }
@@ -127,12 +134,21 @@
   case call_op:    m = wdisp(word_aligned_ones, 0, 30);  v = wdisp(dest_pos, inst_pos, 30); break;
   case branch_op:
     switch (inv_op2(inst)) {
-      case bpr_op2:    m = wdisp16(word_aligned_ones, 0);      v = wdisp16(dest_pos, inst_pos);     break;
       case fbp_op2:    m = wdisp(  word_aligned_ones, 0, 19);  v = wdisp(  dest_pos, inst_pos, 19); break;
       case bp_op2:     m = wdisp(  word_aligned_ones, 0, 19);  v = wdisp(  dest_pos, inst_pos, 19); break;
       case fb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
       case br_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
       case cb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
+      case bpr_op2: {
+        if (is_cbcond(inst)) {
+          m = wdisp10(word_aligned_ones, 0);
+          v = wdisp10(dest_pos, inst_pos);
+        } else {
+          m = wdisp16(word_aligned_ones, 0);
+          v = wdisp16(dest_pos, inst_pos);
+        }
+        break;
+      }
       default: ShouldNotReachHere();
     }
   }
@@ -149,12 +165,19 @@
   case call_op:        r = inv_wdisp(inst, pos, 30);  break;
   case branch_op:
     switch (inv_op2(inst)) {
-      case bpr_op2:    r = inv_wdisp16(inst, pos);    break;
       case fbp_op2:    r = inv_wdisp(  inst, pos, 19);  break;
       case bp_op2:     r = inv_wdisp(  inst, pos, 19);  break;
       case fb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
       case br_op2:     r = inv_wdisp(  inst, pos, 22);  break;
       case cb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
+      case bpr_op2: {
+        if (is_cbcond(inst)) {
+          r = inv_wdisp10(inst, pos);
+        } else {
+          r = inv_wdisp16(inst, pos);
+        }
+        break;
+      }
       default: ShouldNotReachHere();
     }
   }
@@ -968,13 +991,7 @@
   Label PcOk;
   save_frame(0);                // to avoid clobbering O0
   ld_ptr(pc_addr, L0);
-  tst(L0);
-#ifdef _LP64
-  brx(Assembler::zero, false, Assembler::pt, PcOk);
-#else
-  br(Assembler::zero, false, Assembler::pt, PcOk);
-#endif // _LP64
-  delayed() -> nop();
+  br_null_short(L0, Assembler::pt, PcOk);
   stop("last_Java_pc not zeroed before leaving Java");
   bind(PcOk);
 
@@ -1003,7 +1020,7 @@
   Label StackOk;
   andcc(last_java_sp, 0x01, G0);
   br(Assembler::notZero, false, Assembler::pt, StackOk);
-  delayed() -> nop();
+  delayed()->nop();
   stop("Stack Not Biased in set_last_Java_frame");
   bind(StackOk);
 #endif // ASSERT
@@ -1099,8 +1116,7 @@
 
   Address exception_addr(G2_thread, Thread::pending_exception_offset());
   ld_ptr(exception_addr, scratch_reg);
-  br_null(scratch_reg,false,pt,L);
-  delayed()->nop();
+  br_null_short(scratch_reg, pt, L);
   // we use O7 linkage so that forward_exception_entry has the issuing PC
   call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
   delayed()->nop();
@@ -1778,7 +1794,8 @@
   mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
   stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
 
-  set((intptr_t)real_msg, O1);
+  // Size of set() should stay the same
+  patchable_set((intptr_t)real_msg, O1);
   // Load address to call to into O7
   load_ptr_contents(a, O7);
   // Register call to verify_oop_subroutine
@@ -1815,7 +1832,8 @@
   ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
   stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
 
-  set((intptr_t)real_msg, O1);
+  // Size of set() should stay the same
+  patchable_set((intptr_t)real_msg, O1);
   // Load address to call to into O7
   load_ptr_contents(a, O7);
   // Register call to verify_oop_subroutine
@@ -1874,14 +1892,11 @@
 
   // assert((obj & oop_mask) == oop_bits);
   and3(O0_obj, O2_mask, O4_temp);
-  cmp(O4_temp, O3_bits);
-  brx(notEqual, false, pn, null_or_fail);
-  delayed()->nop();
+  cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail);
 
   if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
     // the null_or_fail case is useless; must test for null separately
-    br_null(O0_obj, false, pn, succeed);
-    delayed()->nop();
+    br_null_short(O0_obj, pn, succeed);
   }
 
   // Check the klassOop of this object for being in the right area of memory.
@@ -1893,9 +1908,7 @@
   if( Universe::verify_klass_bits() != Universe::verify_oop_bits() )
     set(Universe::verify_klass_bits(), O3_bits);
   and3(O0_obj, O2_mask, O4_temp);
-  cmp(O4_temp, O3_bits);
-  brx(notEqual, false, pn, fail);
-  delayed()->nop();
+  cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, fail);
   // Check the klass's klass
   load_klass(O0_obj, O0_obj);
   and3(O0_obj, O2_mask, O4_temp);
@@ -1965,7 +1978,8 @@
     save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
 
     // stop_subroutine expects message pointer in I1.
-    set((intptr_t)msg, O1);
+    // Size of set() should stay the same
+    patchable_set((intptr_t)msg, O1);
 
     // factor long stop-sequence into subroutine to save space
     assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
@@ -1987,7 +2001,8 @@
   save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
   RegistersForDebugging::save_registers(this);
   mov(O0, L0);
-  set((intptr_t)msg, O0);
+  // Size of set() should stay the same
+  patchable_set((intptr_t)msg, O0);
   call( CAST_FROM_FN_PTR(address, warning) );
   delayed()->nop();
 //  ret();
@@ -2122,13 +2137,12 @@
   return Assembler::rc_z;
 }
 
-// compares register with zero and branches.  NOT FOR USE WITH 64-bit POINTERS
-void MacroAssembler::br_zero( Condition c, bool a, Predict p, Register s1, Label& L) {
+// compares (32 bit) register with zero and branches.  NOT FOR USE WITH 64-bit POINTERS
+void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) {
   tst(s1);
   br (c, a, p, L);
 }
 
-
 // Compares a pointer register with zero and branches on null.
 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
@@ -2151,27 +2165,91 @@
 #endif
 }
 
-void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
-                                     Register s1, address d,
-                                     relocInfo::relocType rt ) {
-  if (VM_Version::v9_instructions_work()) {
-    bpr(rc, a, p, s1, d, rt);
+// Compare registers and branch with nop in delay slot or cbcond without delay slot.
+
+// Compare integer (32 bit) values (icc only).
+void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c,
+                                      Predict p, Label& L) {
+  assert_not_delayed();
+  if (use_cbcond(L)) {
+    Assembler::cbcond(c, icc, s1, s2, L);
   } else {
-    tst(s1);
-    br(reg_cond_to_cc_cond(rc), a, p, d, rt);
+    cmp(s1, s2);
+    br(c, false, p, L);
+    delayed()->nop();
+  }
+}
+
+// Compare integer (32 bit) values (icc only).
+void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c,
+                                      Predict p, Label& L) {
+  assert_not_delayed();
+  if (is_simm(simm13a,5) && use_cbcond(L)) {
+    Assembler::cbcond(c, icc, s1, simm13a, L);
+  } else {
+    cmp(s1, simm13a);
+    br(c, false, p, L);
+    delayed()->nop();
+  }
+}
+
+// Branch that tests xcc in LP64 and icc in !LP64
+void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c,
+                                       Predict p, Label& L) {
+  assert_not_delayed();
+  if (use_cbcond(L)) {
+    Assembler::cbcond(c, ptr_cc, s1, s2, L);
+  } else {
+    cmp(s1, s2);
+    brx(c, false, p, L);
+    delayed()->nop();
   }
 }
 
-void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
-                                     Register s1, Label& L ) {
-  if (VM_Version::v9_instructions_work()) {
-    bpr(rc, a, p, s1, L);
+// Branch that tests xcc in LP64 and icc in !LP64
+void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c,
+                                       Predict p, Label& L) {
+  assert_not_delayed();
+  if (is_simm(simm13a,5) && use_cbcond(L)) {
+    Assembler::cbcond(c, ptr_cc, s1, simm13a, L);
   } else {
-    tst(s1);
-    br(reg_cond_to_cc_cond(rc), a, p, L);
+    cmp(s1, simm13a);
+    brx(c, false, p, L);
+    delayed()->nop();
   }
 }
 
+// Short branch version for compares a pointer with zero.
+
+void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) {
+  assert_not_delayed();
+  if (use_cbcond(L)) {
+    Assembler::cbcond(zero, ptr_cc, s1, 0, L);
+    return;
+  }
+  br_null(s1, false, p, L);
+  delayed()->nop();
+}
+
+void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) {
+  assert_not_delayed();
+  if (use_cbcond(L)) {
+    Assembler::cbcond(notZero, ptr_cc, s1, 0, L);
+    return;
+  }
+  br_notnull(s1, false, p, L);
+  delayed()->nop();
+}
+
+// Unconditional short branch
+void MacroAssembler::ba_short(Label& L) {
+  if (use_cbcond(L)) {
+    Assembler::cbcond(equal, icc, G0, G0, L);
+    return;
+  }
+  br(always, false, pt, L);
+  delayed()->nop();
+}
 
 // instruction sequences factored across compiler & interpreter
 
@@ -2197,11 +2275,9 @@
   // since that triplet is reached only after finding the high halves differ.
 
   if (VM_Version::v9_instructions_work()) {
-
-                                    mov  (                     -1, Rresult);
-    ba( false, done );  delayed()-> movcc(greater, false, icc,  1, Rresult);
-  }
-  else {
+    mov(-1, Rresult);
+    ba(done);  delayed()-> movcc(greater, false, icc,  1, Rresult);
+  } else {
     br(less,    true, pt, done); delayed()-> set(-1, Rresult);
     br(greater, true, pt, done); delayed()-> set( 1, Rresult);
   }
@@ -2212,9 +2288,8 @@
     mov(                               -1, Rresult);
     movcc(equal,           false, icc,  0, Rresult);
     movcc(greaterUnsigned, false, icc,  1, Rresult);
-  }
-  else {
-                                                    set(-1, Rresult);
+  } else {
+    set(-1, Rresult);
     br(equal,           true, pt, done); delayed()->set( 0, Rresult);
     br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
   }
@@ -2250,11 +2325,10 @@
   // This code can be optimized to use the 64 bit shifts in V9.
   // Here we use the 32 bit shifts.
 
-  and3( Rcount,         0x3f,           Rcount);     // take least significant 6 bits
-  subcc(Rcount,         31,             Ralt_count);
+  and3( Rcount, 0x3f, Rcount);     // take least significant 6 bits
+  subcc(Rcount,   31, Ralt_count);
   br(greater, true, pn, big_shift);
-  delayed()->
-  dec(Ralt_count);
+  delayed()->dec(Ralt_count);
 
   // shift < 32 bits, Ralt_count = Rcount-31
 
@@ -2263,28 +2337,27 @@
   // more to take care of the special (rare) case where count is zero
   // (shifting by 32 would not work).
 
-  neg(  Ralt_count                                 );
+  neg(Ralt_count);
 
   // The order of the next two instructions is critical in the case where
   // Rin and Rout are the same and should not be reversed.
 
-  srl(  Rin_low,        Ralt_count,     Rxfer_bits ); // shift right by 31-count
+  srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count
   if (Rcount != Rout_low) {
-    sll(        Rin_low,        Rcount,         Rout_low   ); // low half
+    sll(Rin_low, Rcount, Rout_low); // low half
   }
-  sll(  Rin_high,       Rcount,         Rout_high  );
+  sll(Rin_high, Rcount, Rout_high);
   if (Rcount == Rout_low) {
-    sll(        Rin_low,        Rcount,         Rout_low   ); // low half
+    sll(Rin_low, Rcount, Rout_low); // low half
   }
-  srl(  Rxfer_bits,     1,              Rxfer_bits ); // shift right by one more
-  ba (false, done);
-  delayed()->
-  or3(  Rout_high,      Rxfer_bits,     Rout_high);   // new hi value: or in shifted old hi part and xfer from low
+  srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more
+  ba(done);
+  delayed()->or3(Rout_high, Rxfer_bits, Rout_high);   // new hi value: or in shifted old hi part and xfer from low
 
   // shift >= 32 bits, Ralt_count = Rcount-32
   bind(big_shift);
-  sll(  Rin_low,        Ralt_count,     Rout_high  );
-  clr(  Rout_low                                   );
+  sll(Rin_low, Ralt_count, Rout_high  );
+  clr(Rout_low);
 
   bind(done);
 }
@@ -2313,8 +2386,8 @@
   // This code can be optimized to use the 64 bit shifts in V9.
   // Here we use the 32 bit shifts.
 
-  and3( Rcount,         0x3f,           Rcount);     // take least significant 6 bits
-  subcc(Rcount,         31,             Ralt_count);
+  and3( Rcount, 0x3f, Rcount);     // take least significant 6 bits
+  subcc(Rcount,   31, Ralt_count);
   br(greater, true, pn, big_shift);
   delayed()->dec(Ralt_count);
 
@@ -2325,29 +2398,28 @@
   // more to take care of the special (rare) case where count is zero
   // (shifting by 32 would not work).
 
-  neg(  Ralt_count                                  );
+  neg(Ralt_count);
   if (Rcount != Rout_low) {
-    srl(        Rin_low,        Rcount,         Rout_low    );
+    srl(Rin_low, Rcount, Rout_low);
   }
 
   // The order of the next two instructions is critical in the case where
   // Rin and Rout are the same and should not be reversed.
 
-  sll(  Rin_high,       Ralt_count,     Rxfer_bits  ); // shift left by 31-count
-  sra(  Rin_high,       Rcount,         Rout_high   ); // high half
-  sll(  Rxfer_bits,     1,              Rxfer_bits  ); // shift left by one more
+  sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
+  sra(Rin_high,     Rcount, Rout_high ); // high half
+  sll(Rxfer_bits,        1, Rxfer_bits); // shift left by one more
   if (Rcount == Rout_low) {
-    srl(        Rin_low,        Rcount,         Rout_low    );
+    srl(Rin_low, Rcount, Rout_low);
   }
-  ba (false, done);
-  delayed()->
-  or3(  Rout_low,       Rxfer_bits,     Rout_low    ); // new low value: or shifted old low part and xfer from high
+  ba(done);
+  delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
 
   // shift >= 32 bits, Ralt_count = Rcount-32
   bind(big_shift);
 
-  sra(  Rin_high,       Ralt_count,     Rout_low    );
-  sra(  Rin_high,       31,             Rout_high   ); // sign into hi
+  sra(Rin_high, Ralt_count, Rout_low);
+  sra(Rin_high,         31, Rout_high); // sign into hi
 
   bind( done );
 }
@@ -2377,8 +2449,8 @@
   // This code can be optimized to use the 64 bit shifts in V9.
   // Here we use the 32 bit shifts.
 
-  and3( Rcount,         0x3f,           Rcount);     // take least significant 6 bits
-  subcc(Rcount,         31,             Ralt_count);
+  and3( Rcount, 0x3f, Rcount);     // take least significant 6 bits
+  subcc(Rcount,   31, Ralt_count);
   br(greater, true, pn, big_shift);
   delayed()->dec(Ralt_count);
 
@@ -2389,29 +2461,28 @@
   // more to take care of the special (rare) case where count is zero
   // (shifting by 32 would not work).
 
-  neg(  Ralt_count                                  );
+  neg(Ralt_count);
   if (Rcount != Rout_low) {
-    srl(        Rin_low,        Rcount,         Rout_low    );
+    srl(Rin_low, Rcount, Rout_low);
   }
 
   // The order of the next two instructions is critical in the case where
   // Rin and Rout are the same and should not be reversed.
 
-  sll(  Rin_high,       Ralt_count,     Rxfer_bits  ); // shift left by 31-count
-  srl(  Rin_high,       Rcount,         Rout_high   ); // high half
-  sll(  Rxfer_bits,     1,              Rxfer_bits  ); // shift left by one more
+  sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
+  srl(Rin_high,     Rcount, Rout_high ); // high half
+  sll(Rxfer_bits,        1, Rxfer_bits); // shift left by one more
   if (Rcount == Rout_low) {
-    srl(        Rin_low,        Rcount,         Rout_low    );
+    srl(Rin_low, Rcount, Rout_low);
   }
-  ba (false, done);
-  delayed()->
-  or3(  Rout_low,       Rxfer_bits,     Rout_low    ); // new low value: or shifted old low part and xfer from high
+  ba(done);
+  delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
 
   // shift >= 32 bits, Ralt_count = Rcount-32
   bind(big_shift);
 
-  srl(  Rin_high,       Ralt_count,     Rout_low    );
-  clr(  Rout_high                                   );
+  srl(Rin_high, Ralt_count, Rout_low);
+  clr(Rout_high);
 
   bind( done );
 }
@@ -2419,7 +2490,7 @@
 #ifdef _LP64
 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
   cmp(Ra, Rb);
-  mov(                       -1, Rresult);
+  mov(-1, Rresult);
   movcc(equal,   false, xcc,  0, Rresult);
   movcc(greater, false, xcc,  1, Rresult);
 }
@@ -2459,14 +2530,14 @@
 
   if (VM_Version::v9_instructions_work()) {
 
-    mov(                   -1, Rresult );
-    movcc( eq, true, fcc0,  0, Rresult );
-    movcc( gt, true, fcc0,  1, Rresult );
+    mov(-1, Rresult);
+    movcc(eq, true, fcc0, 0, Rresult);
+    movcc(gt, true, fcc0, 1, Rresult);
 
   } else {
     Label done;
 
-                                         set( -1, Rresult );
+    set( -1, Rresult );
     //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
     fb( eq, true, pn, done);  delayed()->set(  0, Rresult );
     fb( gt, true, pn, done);  delayed()->set(  1, Rresult );
@@ -2668,9 +2739,7 @@
     set(StubRoutines::Sparc::locked, lock_reg);
 
     bind(retry_get_lock);
-    cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
-    br(Assembler::less, false, Assembler::pt, dont_yield);
-    delayed()->nop();
+    cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
 
     if(use_call_vm) {
       Untested("Need to verify global reg consistancy");
@@ -2700,9 +2769,7 @@
 
     // yes, got lock.  do we have the same top?
     ld(top_ptr_reg_after_save, 0, value_reg);
-    cmp(value_reg, top_reg_after_save);
-    br(Assembler::notEqual, false, Assembler::pn, not_same);
-    delayed()->nop();
+    cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
 
     // yes, same top.
     st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
@@ -2952,8 +3019,7 @@
 
   // on success:
   restore();
-  ba(false, L_success);
-  delayed()->nop();
+  ba_short(L_success);
 
   // on failure:
   bind(L_pop_to_failure);
@@ -2969,8 +3035,7 @@
                                                    Label* L_success,
                                                    Label* L_failure,
                                                    Label* L_slow_path,
-                                        RegisterOrConstant super_check_offset,
-                                        Register instanceof_hack) {
+                                        RegisterOrConstant super_check_offset) {
   int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
                    Klass::secondary_super_cache_offset_in_bytes());
   int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
@@ -2993,29 +3058,10 @@
   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
-  assert(label_nulls <= 1 || instanceof_hack != noreg ||
+  assert(label_nulls <= 1 ||
          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
          "at most one NULL in the batch, usually");
 
-  // Support for the instanceof hack, which uses delay slots to
-  // set a destination register to zero or one.
-  bool do_bool_sets = (instanceof_hack != noreg);
-#define BOOL_SET(bool_value)                            \
-  if (do_bool_sets && bool_value >= 0)                  \
-    set(bool_value, instanceof_hack)
-#define DELAYED_BOOL_SET(bool_value)                    \
-  if (do_bool_sets && bool_value >= 0)                  \
-    delayed()->set(bool_value, instanceof_hack);        \
-  else delayed()->nop()
-  // Hacked ba(), which may only be used just before L_fallthrough.
-#define FINAL_JUMP(label, bool_value)                   \
-  if (&(label) == &L_fallthrough) {                     \
-    BOOL_SET(bool_value);                               \
-  } else {                                              \
-    ba((do_bool_sets && bool_value >= 0), label);       \
-    DELAYED_BOOL_SET(bool_value);                       \
-  }
-
   // If the pointers are equal, we are done (e.g., String[] elements).
   // This self-check enables sharing of secondary supertype arrays among
   // non-primary types such as array-of-interface.  Otherwise, each such
@@ -3024,8 +3070,8 @@
   // type checks are in fact trivially successful in this manner,
   // so we get a nicely predicted branch right at the start of the check.
   cmp(super_klass, sub_klass);
-  brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
-  DELAYED_BOOL_SET(1);
+  brx(Assembler::equal, false, Assembler::pn, *L_success);
+  delayed()->nop();
 
   // Check the supertype display:
   if (must_load_sco) {
@@ -3049,50 +3095,49 @@
   // So if it was a primary super, we can just fail immediately.
   // Otherwise, it's the slow path for us (no success at this point).
 
+  // Hacked ba(), which may only be used just before L_fallthrough.
+#define FINAL_JUMP(label)            \
+  if (&(label) != &L_fallthrough) {  \
+    ba(label);  delayed()->nop();    \
+  }
+
   if (super_check_offset.is_register()) {
-    brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
-    delayed(); if (do_bool_sets)  BOOL_SET(1);
-    // if !do_bool_sets, sneak the next cmp into the delay slot:
-    cmp(super_check_offset.as_register(), sc_offset);
+    brx(Assembler::equal, false, Assembler::pn, *L_success);
+    delayed()->cmp(super_check_offset.as_register(), sc_offset);
 
     if (L_failure == &L_fallthrough) {
-      brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_slow_path);
+      brx(Assembler::equal, false, Assembler::pt, *L_slow_path);
       delayed()->nop();
-      BOOL_SET(0);  // fallthrough on failure
     } else {
-      brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
-      DELAYED_BOOL_SET(0);
-      FINAL_JUMP(*L_slow_path, -1);  // -1 => vanilla delay slot
+      brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
+      delayed()->nop();
+      FINAL_JUMP(*L_slow_path);
     }
   } else if (super_check_offset.as_constant() == sc_offset) {
     // Need a slow path; fast failure is impossible.
     if (L_slow_path == &L_fallthrough) {
-      brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
-      DELAYED_BOOL_SET(1);
+      brx(Assembler::equal, false, Assembler::pt, *L_success);
+      delayed()->nop();
     } else {
       brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
       delayed()->nop();
-      FINAL_JUMP(*L_success, 1);
+      FINAL_JUMP(*L_success);
     }
   } else {
     // No slow path; it's a fast decision.
     if (L_failure == &L_fallthrough) {
-      brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
-      DELAYED_BOOL_SET(1);
-      BOOL_SET(0);
+      brx(Assembler::equal, false, Assembler::pt, *L_success);
+      delayed()->nop();
     } else {
-      brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
-      DELAYED_BOOL_SET(0);
-      FINAL_JUMP(*L_success, 1);
+      brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
+      delayed()->nop();
+      FINAL_JUMP(*L_success);
     }
   }
 
   bind(L_fallthrough);
 
-#undef final_jump
-#undef bool_set
-#undef DELAYED_BOOL_SET
-#undef final_jump
+#undef FINAL_JUMP
 }
 
 
@@ -3185,7 +3230,7 @@
   st_ptr(super_klass, sub_klass, sc_offset);
 
   if (L_success != &L_fallthrough) {
-    ba(false, *L_success);
+    ba(*L_success);
     delayed()->nop();
   }
 
@@ -3200,9 +3245,7 @@
   // compare method type against that of the receiver
   RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg);
   load_heap_oop(mh_reg, mhtype_offset, temp_reg);
-  cmp(temp_reg, mtype_reg);
-  br(Assembler::notEqual, false, Assembler::pn, wrong_method_type);
-  delayed()->nop();
+  cmp_and_brx_short(temp_reg, mtype_reg, Assembler::notEqual, Assembler::pn, wrong_method_type);
 }
 
 
@@ -3214,15 +3257,10 @@
                                                 Register temp_reg) {
   assert_different_registers(vmslots_reg, mh_reg, temp_reg);
   // load mh.type.form.vmslots
-  if (java_lang_invoke_MethodHandle::vmslots_offset_in_bytes() != 0) {
-    // hoist vmslots into every mh to avoid dependent load chain
-    ld(           Address(mh_reg,    delayed_value(java_lang_invoke_MethodHandle::vmslots_offset_in_bytes, temp_reg)),   vmslots_reg);
-  } else {
-    Register temp2_reg = vmslots_reg;
-    load_heap_oop(Address(mh_reg,    delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)),      temp2_reg);
-    load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)),        temp2_reg);
-    ld(           Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
-  }
+  Register temp2_reg = vmslots_reg;
+  load_heap_oop(Address(mh_reg,    delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)),      temp2_reg);
+  load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)),        temp2_reg);
+  ld(           Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
 }
 
 
@@ -3295,9 +3333,7 @@
   // pointers to allow age to be placed into low bits
   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
   and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
-  cmp(temp_reg, markOopDesc::biased_lock_pattern);
-  brx(Assembler::notEqual, false, Assembler::pn, cas_label);
-  delayed()->nop();
+  cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
 
   load_klass(obj_reg, temp_reg);
   ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
@@ -3364,8 +3400,7 @@
     brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
     delayed()->nop();
   }
-  br(Assembler::always, false, Assembler::pt, done);
-  delayed()->nop();
+  ba_short(done);
 
   bind(try_rebias);
   // At this point we know the epoch has expired, meaning that the
@@ -3393,8 +3428,7 @@
     brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
     delayed()->nop();
   }
-  br(Assembler::always, false, Assembler::pt, done);
-  delayed()->nop();
+  ba_short(done);
 
   bind(try_revoke_bias);
   // The prototype mark in the klass doesn't have the bias bit set any
@@ -3445,7 +3479,7 @@
 // Solaris/SPARC's "as".  Another apt name would be cas_ptr()
 
 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
-  casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ;
+  casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
 }
 
 
@@ -3486,9 +3520,9 @@
    }
 
    if (EmitSync & 1) {
-     mov    (3, Rscratch) ;
-     st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
-     cmp    (SP, G0) ;
+     mov(3, Rscratch);
+     st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+     cmp(SP, G0);
      return ;
    }
 
@@ -3529,7 +3563,7 @@
      assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
      andcc(Rscratch, 0xfffff003, Rscratch);
      st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
-     bind (done) ;
+     bind (done);
      return ;
    }
 
@@ -3538,7 +3572,7 @@
    if (EmitSync & 256) {
       Label IsInflated ;
 
-      ld_ptr (mark_addr, Rmark);           // fetch obj->mark
+      ld_ptr(mark_addr, Rmark);           // fetch obj->mark
       // Triage: biased, stack-locked, neutral, inflated
       if (try_bias) {
         biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
@@ -3549,49 +3583,49 @@
       // Store mark into displaced mark field in the on-stack basic-lock "box"
       // Critically, this must happen before the CAS
       // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
-      st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
-      andcc  (Rmark, 2, G0) ;
-      brx    (Assembler::notZero, false, Assembler::pn, IsInflated) ;
-      delayed() ->
+      st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
+      andcc(Rmark, 2, G0);
+      brx(Assembler::notZero, false, Assembler::pn, IsInflated);
+      delayed()->
 
       // Try stack-lock acquisition.
       // Beware: the 1st instruction is in a delay slot
-      mov    (Rbox,  Rscratch);
-      or3    (Rmark, markOopDesc::unlocked_value, Rmark);
-      assert (mark_addr.disp() == 0, "cas must take a zero displacement");
-      casn   (mark_addr.base(), Rmark, Rscratch) ;
-      cmp    (Rmark, Rscratch);
-      brx    (Assembler::equal, false, Assembler::pt, done);
+      mov(Rbox,  Rscratch);
+      or3(Rmark, markOopDesc::unlocked_value, Rmark);
+      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
+      casn(mark_addr.base(), Rmark, Rscratch);
+      cmp(Rmark, Rscratch);
+      brx(Assembler::equal, false, Assembler::pt, done);
       delayed()->sub(Rscratch, SP, Rscratch);
 
       // Stack-lock attempt failed - check for recursive stack-lock.
       // See the comments below about how we might remove this case.
 #ifdef _LP64
-      sub    (Rscratch, STACK_BIAS, Rscratch);
+      sub(Rscratch, STACK_BIAS, Rscratch);
 #endif
       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
-      andcc  (Rscratch, 0xfffff003, Rscratch);
-      br     (Assembler::always, false, Assembler::pt, done) ;
-      delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
-
-      bind   (IsInflated) ;
+      andcc(Rscratch, 0xfffff003, Rscratch);
+      br(Assembler::always, false, Assembler::pt, done);
+      delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+
+      bind(IsInflated);
       if (EmitSync & 64) {
          // If m->owner != null goto IsLocked
          // Pessimistic form: Test-and-CAS vs CAS
          // The optimistic form avoids RTS->RTO cache line upgrades.
-         ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
-         andcc  (Rscratch, Rscratch, G0) ;
-         brx    (Assembler::notZero, false, Assembler::pn, done) ;
-         delayed()->nop() ;
+         ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
+         andcc(Rscratch, Rscratch, G0);
+         brx(Assembler::notZero, false, Assembler::pn, done);
+         delayed()->nop();
          // m->owner == null : it's unlocked.
       }
 
       // Try to CAS m->owner from null to Self
       // Invariant: if we acquire the lock then _recursions should be 0.
-      add    (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
-      mov    (G2_thread, Rscratch) ;
-      casn   (Rmark, G0, Rscratch) ;
-      cmp    (Rscratch, G0) ;
+      add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
+      mov(G2_thread, Rscratch);
+      casn(Rmark, G0, Rscratch);
+      cmp(Rscratch, G0);
       // Intentional fall-through into done
    } else {
       // Aggressively avoid the Store-before-CAS penalty
@@ -3599,9 +3633,9 @@
       Label IsInflated, Recursive ;
 
 // Anticipate CAS -- Avoid RTS->RTO upgrade
-// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
-
-      ld_ptr (mark_addr, Rmark);           // fetch obj->mark
+// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
+
+      ld_ptr(mark_addr, Rmark);           // fetch obj->mark
       // Triage: biased, stack-locked, neutral, inflated
 
       if (try_bias) {
@@ -3609,8 +3643,8 @@
         // Invariant: if control reaches this point in the emitted stream
         // then Rmark has not been modified.
       }
-      andcc  (Rmark, 2, G0) ;
-      brx    (Assembler::notZero, false, Assembler::pn, IsInflated) ;
+      andcc(Rmark, 2, G0);
+      brx(Assembler::notZero, false, Assembler::pn, IsInflated);
       delayed()->                         // Beware - dangling delay-slot
 
       // Try stack-lock acquisition.
@@ -3620,23 +3654,21 @@
       //   ST obj->mark = box    -- overwrite transient 0 value
       // This presumes TSO, of course.
 
-      mov    (0, Rscratch) ;
-      or3    (Rmark, markOopDesc::unlocked_value, Rmark);
-      assert (mark_addr.disp() == 0, "cas must take a zero displacement");
-      casn   (mark_addr.base(), Rmark, Rscratch) ;
-// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
-      cmp    (Rscratch, Rmark) ;
-      brx    (Assembler::notZero, false, Assembler::pn, Recursive) ;
-      delayed() ->
-        st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
+      mov(0, Rscratch);
+      or3(Rmark, markOopDesc::unlocked_value, Rmark);
+      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
+      casn(mark_addr.base(), Rmark, Rscratch);
+// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
+      cmp(Rscratch, Rmark);
+      brx(Assembler::notZero, false, Assembler::pn, Recursive);
+      delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
       if (counters != NULL) {
         cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
       }
-      br     (Assembler::always, false, Assembler::pt, done);
-      delayed() ->
-        st_ptr (Rbox, mark_addr) ;
-
-      bind   (Recursive) ;
+      ba(done);
+      delayed()->st_ptr(Rbox, mark_addr);
+
+      bind(Recursive);
       // Stack-lock attempt failed - check for recursive stack-lock.
       // Tests show that we can remove the recursive case with no impact
       // on refworkload 0.83.  If we need to reduce the size of the code
@@ -3653,49 +3685,48 @@
 
       // RScratch contains the fetched obj->mark value from the failed CASN.
 #ifdef _LP64
-      sub    (Rscratch, STACK_BIAS, Rscratch);
+      sub(Rscratch, STACK_BIAS, Rscratch);
 #endif
       sub(Rscratch, SP, Rscratch);
       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
-      andcc  (Rscratch, 0xfffff003, Rscratch);
+      andcc(Rscratch, 0xfffff003, Rscratch);
       if (counters != NULL) {
         // Accounting needs the Rscratch register
-        st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+        st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
         cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
-        br     (Assembler::always, false, Assembler::pt, done) ;
-        delayed()->nop() ;
+        ba_short(done);
       } else {
-        br     (Assembler::always, false, Assembler::pt, done) ;
-        delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+        ba(done);
+        delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
       }
 
-      bind   (IsInflated) ;
+      bind   (IsInflated);
       if (EmitSync & 64) {
          // If m->owner != null goto IsLocked
          // Test-and-CAS vs CAS
          // Pessimistic form avoids futile (doomed) CAS attempts
          // The optimistic form avoids RTS->RTO cache line upgrades.
-         ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
-         andcc  (Rscratch, Rscratch, G0) ;
-         brx    (Assembler::notZero, false, Assembler::pn, done) ;
-         delayed()->nop() ;
+         ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
+         andcc(Rscratch, Rscratch, G0);
+         brx(Assembler::notZero, false, Assembler::pn, done);
+         delayed()->nop();
          // m->owner == null : it's unlocked.
       }
 
       // Try to CAS m->owner from null to Self
       // Invariant: if we acquire the lock then _recursions should be 0.
-      add    (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
-      mov    (G2_thread, Rscratch) ;
-      casn   (Rmark, G0, Rscratch) ;
-      cmp    (Rscratch, G0) ;
+      add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
+      mov(G2_thread, Rscratch);
+      casn(Rmark, G0, Rscratch);
+      cmp(Rscratch, G0);
       // ST box->displaced_header = NonZero.
       // Any non-zero value suffices:
       //    unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
-      st_ptr (Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
+      st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
       // Intentional fall-through into done
    }
 
-   bind   (done) ;
+   bind   (done);
 }
 
 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
@@ -3706,7 +3737,7 @@
    Label done ;
 
    if (EmitSync & 4) {
-     cmp  (SP, G0) ;
+     cmp(SP, G0);
      return ;
    }
 
@@ -3717,18 +3748,16 @@
 
      // Test first if it is a fast recursive unlock
      ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
-     cmp(Rmark, G0);
-     brx(Assembler::equal, false, Assembler::pt, done);
-     delayed()->nop();
+     br_null_short(Rmark, Assembler::pt, done);
 
      // Check if it is still a light weight lock, this is is true if we see
      // the stack address of the basicLock in the markOop of the object
      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
      casx_under_lock(mark_addr.base(), Rbox, Rmark,
        (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
-     br (Assembler::always, false, Assembler::pt, done);
+     ba(done);
      delayed()->cmp(Rbox, Rmark);
-     bind (done) ;
+     bind(done);
      return ;
    }
 
@@ -3743,14 +3772,14 @@
       biased_locking_exit(mark_addr, Rscratch, done);
    }
 
-   ld_ptr (Roop, oopDesc::mark_offset_in_bytes(), Rmark) ;
-   ld_ptr (Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
-   andcc  (Rscratch, Rscratch, G0);
-   brx    (Assembler::zero, false, Assembler::pn, done);
-   delayed()-> nop() ;      // consider: relocate fetch of mark, above, into this DS
-   andcc  (Rmark, 2, G0) ;
-   brx    (Assembler::zero, false, Assembler::pt, LStacked) ;
-   delayed()-> nop() ;
+   ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark);
+   ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
+   andcc(Rscratch, Rscratch, G0);
+   brx(Assembler::zero, false, Assembler::pn, done);
+   delayed()->nop();      // consider: relocate fetch of mark, above, into this DS
+   andcc(Rmark, 2, G0);
+   brx(Assembler::zero, false, Assembler::pt, LStacked);
+   delayed()->nop();
 
    // It's inflated
    // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
@@ -3761,48 +3790,45 @@
    // Note that we use 1-0 locking by default for the inflated case.  We
    // close the resultant (and rare) race by having contented threads in
    // monitorenter periodically poll _owner.
-   ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
-   ld_ptr (Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
-   xor3   (Rscratch, G2_thread, Rscratch) ;
-   orcc   (Rbox, Rscratch, Rbox) ;
-   brx    (Assembler::notZero, false, Assembler::pn, done) ;
+   ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
+   ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
+   xor3(Rscratch, G2_thread, Rscratch);
+   orcc(Rbox, Rscratch, Rbox);
+   brx(Assembler::notZero, false, Assembler::pn, done);
    delayed()->
-   ld_ptr (Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
-   ld_ptr (Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
-   orcc   (Rbox, Rscratch, G0) ;
+   ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
+   ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
+   orcc(Rbox, Rscratch, G0);
    if (EmitSync & 65536) {
       Label LSucc ;
-      brx    (Assembler::notZero, false, Assembler::pn, LSucc) ;
-      delayed()->nop() ;
-      br     (Assembler::always, false, Assembler::pt, done) ;
-      delayed()->
-      st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
-
-      bind   (LSucc) ;
-      st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
-      if (os::is_MP()) { membar (StoreLoad) ; }
-      ld_ptr (Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
-      andcc  (Rscratch, Rscratch, G0) ;
-      brx    (Assembler::notZero, false, Assembler::pt, done) ;
-      delayed()-> andcc (G0, G0, G0) ;
-      add    (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
-      mov    (G2_thread, Rscratch) ;
-      casn   (Rmark, G0, Rscratch) ;
-      cmp    (Rscratch, G0) ;
+      brx(Assembler::notZero, false, Assembler::pn, LSucc);
+      delayed()->nop();
+      ba(done);
+      delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
+
+      bind(LSucc);
+      st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
+      if (os::is_MP()) { membar (StoreLoad); }
+      ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
+      andcc(Rscratch, Rscratch, G0);
+      brx(Assembler::notZero, false, Assembler::pt, done);
+      delayed()->andcc(G0, G0, G0);
+      add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
+      mov(G2_thread, Rscratch);
+      casn(Rmark, G0, Rscratch);
       // invert icc.zf and goto done
-      brx    (Assembler::notZero, false, Assembler::pt, done) ;
-      delayed() -> cmp (G0, G0) ;
-      br     (Assembler::always, false, Assembler::pt, done);
-      delayed() -> cmp (G0, 1) ;
+      br_notnull(Rscratch, false, Assembler::pt, done);
+      delayed()->cmp(G0, G0);
+      ba(done);
+      delayed()->cmp(G0, 1);
    } else {
-      brx    (Assembler::notZero, false, Assembler::pn, done) ;
-      delayed()->nop() ;
-      br     (Assembler::always, false, Assembler::pt, done) ;
-      delayed()->
-      st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
+      brx(Assembler::notZero, false, Assembler::pn, done);
+      delayed()->nop();
+      ba(done);
+      delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
    }
 
-   bind   (LStacked) ;
+   bind   (LStacked);
    // Consider: we could replace the expensive CAS in the exit
    // path with a simple ST of the displaced mark value fetched from
    // the on-stack basiclock box.  That admits a race where a thread T2
@@ -3831,11 +3857,11 @@
    // A prototype implementation showed excellent results, although
    // the scavenger and timeout code was rather involved.
 
-   casn   (mark_addr.base(), Rbox, Rscratch) ;
-   cmp    (Rbox, Rscratch);
+   casn(mark_addr.base(), Rbox, Rscratch);
+   cmp(Rbox, Rscratch);
    // Intentional fall through into done ...
 
-   bind   (done) ;
+   bind(done);
 }
 
 
@@ -3891,9 +3917,7 @@
     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
     or3(t1, t2, t3);
-    cmp(t1, t2);
-    br(Assembler::greaterEqual, false, Assembler::pn, next);
-    delayed()->nop();
+    cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next);
     stop("assert(top >= start)");
     should_not_reach_here();
 
@@ -3901,17 +3925,13 @@
     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
     or3(t3, t2, t3);
-    cmp(t1, t2);
-    br(Assembler::lessEqual, false, Assembler::pn, next2);
-    delayed()->nop();
+    cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2);
     stop("assert(top <= end)");
     should_not_reach_here();
 
     bind(next2);
     and3(t3, MinObjAlignmentInBytesMask, t3);
-    cmp(t3, 0);
-    br(Assembler::lessEqual, false, Assembler::pn, ok);
-    delayed()->nop();
+    cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok);
     stop("assert(aligned)");
     should_not_reach_here();
 
@@ -3937,8 +3957,7 @@
 
   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
     // No allocation in the shared eden.
-    br(Assembler::always, false, Assembler::pt, slow_case);
-    delayed()->nop();
+    ba_short(slow_case);
   } else {
     // get eden boundaries
     // note: we need both top & top_addr!
@@ -4072,8 +4091,7 @@
 
   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
     // No allocation in the shared eden.
-    br(Assembler::always, false, Assembler::pt, slow_case);
-    delayed()->nop();
+    ba_short(slow_case);
   }
 
   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
@@ -4098,8 +4116,7 @@
     add(t2, 1, t2);
     stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
   }
-  br(Assembler::always, false, Assembler::pt, try_eden);
-  delayed()->nop();
+  ba_short(try_eden);
 
   bind(discard_tlab);
   if (TLABStats) {
@@ -4115,8 +4132,7 @@
 
   // if tlab is currently allocated (top or end != null) then
   // fill [top, end + alignment_reserve) with array object
-  br_null(top, false, Assembler::pn, do_refill);
-  delayed()->nop();
+  br_null_short(top, Assembler::pn, do_refill);
 
   set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
   st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
@@ -4151,9 +4167,7 @@
     Label ok;
     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
     sll_ptr(t2, LogHeapWordSize, t2);
-    cmp(t1, t2);
-    br(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
+    cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok);
     stop("assert(t1 == tlab_size)");
     should_not_reach_here();
 
@@ -4164,8 +4178,7 @@
   sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
   st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
   verify_tlab();
-  br(Assembler::always, false, Assembler::pt, retry);
-  delayed()->nop();
+  ba_short(retry);
 }
 
 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
@@ -4290,77 +4303,89 @@
   BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
   CodeBuffer buf(bb);
   MacroAssembler masm(&buf);
-  address start = masm.pc();
+
+#define __ masm.
+
+  address start = __ pc();
   Register pre_val;
 
   Label refill, restart;
   if (with_frame) {
-    masm.save_frame(0);
+    __ save_frame(0);
     pre_val = I0;  // Was O0 before the save.
   } else {
     pre_val = O0;
   }
+
   int satb_q_index_byte_offset =
     in_bytes(JavaThread::satb_mark_queue_offset() +
              PtrQueue::byte_offset_of_index());
+
   int satb_q_buf_byte_offset =
     in_bytes(JavaThread::satb_mark_queue_offset() +
              PtrQueue::byte_offset_of_buf());
+
   assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
          in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
          "check sizes in assembly below");
 
-  masm.bind(restart);
-  masm.ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
-
-  masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill);
-  // If the branch is taken, no harm in executing this in the delay slot.
-  masm.delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
-  masm.sub(L0, oopSize, L0);
-
-  masm.st_ptr(pre_val, L1, L0);  // [_buf + index] := I0
+  __ bind(restart);
+
+  // Load the index into the SATB buffer. PtrQueue::_index is a size_t
+  // so ld_ptr is appropriate.
+  __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
+
+  // index == 0?
+  __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
+
+  __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
+  __ sub(L0, oopSize, L0);
+
+  __ st_ptr(pre_val, L1, L0);  // [_buf + index] := I0
   if (!with_frame) {
     // Use return-from-leaf
-    masm.retl();
-    masm.delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
+    __ retl();
+    __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
   } else {
     // Not delayed.
-    masm.st_ptr(L0, G2_thread, satb_q_index_byte_offset);
+    __ st_ptr(L0, G2_thread, satb_q_index_byte_offset);
   }
   if (with_frame) {
-    masm.ret();
-    masm.delayed()->restore();
+    __ ret();
+    __ delayed()->restore();
   }
-  masm.bind(refill);
+  __ bind(refill);
 
   address handle_zero =
     CAST_FROM_FN_PTR(address,
                      &SATBMarkQueueSet::handle_zero_index_for_thread);
   // This should be rare enough that we can afford to save all the
   // scratch registers that the calling context might be using.
-  masm.mov(G1_scratch, L0);
-  masm.mov(G3_scratch, L1);
-  masm.mov(G4, L2);
+  __ mov(G1_scratch, L0);
+  __ mov(G3_scratch, L1);
+  __ mov(G4, L2);
   // We need the value of O0 above (for the write into the buffer), so we
   // save and restore it.
-  masm.mov(O0, L3);
+  __ mov(O0, L3);
   // Since the call will overwrite O7, we save and restore that, as well.
-  masm.mov(O7, L4);
-  masm.call_VM_leaf(L5, handle_zero, G2_thread);
-  masm.mov(L0, G1_scratch);
-  masm.mov(L1, G3_scratch);
-  masm.mov(L2, G4);
-  masm.mov(L3, O0);
-  masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
-  masm.delayed()->mov(L4, O7);
+  __ mov(O7, L4);
+  __ call_VM_leaf(L5, handle_zero, G2_thread);
+  __ mov(L0, G1_scratch);
+  __ mov(L1, G3_scratch);
+  __ mov(L2, G4);
+  __ mov(L3, O0);
+  __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
+  __ delayed()->mov(L4, O7);
 
   if (with_frame) {
     satb_log_enqueue_with_frame = start;
-    satb_log_enqueue_with_frame_end = masm.pc();
+    satb_log_enqueue_with_frame_end = __ pc();
   } else {
     satb_log_enqueue_frameless = start;
-    satb_log_enqueue_frameless_end = masm.pc();
+    satb_log_enqueue_frameless_end = __ pc();
   }
+
+#undef __
 }
 
 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
@@ -4424,9 +4449,8 @@
          tmp);
   }
 
-  // Check on whether to annul.
-  br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
-  delayed() -> nop();
+  // Is marking active?
+  cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
 
   // Do we need to load the previous value?
   if (obj != noreg) {
@@ -4448,9 +4472,7 @@
   assert(pre_val != noreg, "must have a real register");
 
   // Is the previous value null?
-  // Check on whether to annul.
-  br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
-  delayed() -> nop();
+  cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered);
 
   // OK, it's not filtered, so we'll need to call enqueue.  In the normal
   // case, pre_val will be a scratch G-reg, but there are some cases in
@@ -4477,39 +4499,6 @@
   bind(filtered);
 }
 
-static jint num_ct_writes = 0;
-static jint num_ct_writes_filtered_in_hr = 0;
-static jint num_ct_writes_filtered_null = 0;
-static G1CollectedHeap* g1 = NULL;
-
-static Thread* count_ct_writes(void* filter_val, void* new_val) {
-  Atomic::inc(&num_ct_writes);
-  if (filter_val == NULL) {
-    Atomic::inc(&num_ct_writes_filtered_in_hr);
-  } else if (new_val == NULL) {
-    Atomic::inc(&num_ct_writes_filtered_null);
-  } else {
-    if (g1 == NULL) {
-      g1 = G1CollectedHeap::heap();
-    }
-  }
-  if ((num_ct_writes % 1000000) == 0) {
-    jint num_ct_writes_filtered =
-      num_ct_writes_filtered_in_hr +
-      num_ct_writes_filtered_null;
-
-    tty->print_cr("%d potential CT writes: %5.2f%% filtered\n"
-                  "   (%5.2f%% intra-HR, %5.2f%% null).",
-                  num_ct_writes,
-                  100.0*(float)num_ct_writes_filtered/(float)num_ct_writes,
-                  100.0*(float)num_ct_writes_filtered_in_hr/
-                  (float)num_ct_writes,
-                  100.0*(float)num_ct_writes_filtered_null/
-                  (float)num_ct_writes);
-  }
-  return Thread::current();
-}
-
 static address dirty_card_log_enqueue = 0;
 static u_char* dirty_card_log_enqueue_end = 0;
 
@@ -4518,79 +4507,88 @@
   BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
   CodeBuffer buf(bb);
   MacroAssembler masm(&buf);
-  address start = masm.pc();
+#define __ masm.
+  address start = __ pc();
 
   Label not_already_dirty, restart, refill;
 
 #ifdef _LP64
-  masm.srlx(O0, CardTableModRefBS::card_shift, O0);
+  __ srlx(O0, CardTableModRefBS::card_shift, O0);
 #else
-  masm.srl(O0, CardTableModRefBS::card_shift, O0);
+  __ srl(O0, CardTableModRefBS::card_shift, O0);
 #endif
   AddressLiteral addrlit(byte_map_base);
-  masm.set(addrlit, O1); // O1 := <card table base>
-  masm.ldub(O0, O1, O2); // O2 := [O0 + O1]
-
-  masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
-                      O2, not_already_dirty);
-  // Get O1 + O2 into a reg by itself -- useful in the take-the-branch
-  // case, harmless if not.
-  masm.delayed()->add(O0, O1, O3);
+  __ set(addrlit, O1); // O1 := <card table base>
+  __ ldub(O0, O1, O2); // O2 := [O0 + O1]
+
+  assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
+  __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
 
   // We didn't take the branch, so we're already dirty: return.
   // Use return-from-leaf
-  masm.retl();
-  masm.delayed()->nop();
+  __ retl();
+  __ delayed()->nop();
 
   // Not dirty.
-  masm.bind(not_already_dirty);
+  __ bind(not_already_dirty);
+
+  // Get O0 + O1 into a reg by itself
+  __ add(O0, O1, O3);
+
   // First, dirty it.
-  masm.stb(G0, O3, G0);  // [cardPtr] := 0  (i.e., dirty).
+  __ stb(G0, O3, G0);  // [cardPtr] := 0  (i.e., dirty).
+
   int dirty_card_q_index_byte_offset =
     in_bytes(JavaThread::dirty_card_queue_offset() +
              PtrQueue::byte_offset_of_index());
   int dirty_card_q_buf_byte_offset =
     in_bytes(JavaThread::dirty_card_queue_offset() +
              PtrQueue::byte_offset_of_buf());
-  masm.bind(restart);
-  masm.ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
-
-  masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
-                      L0, refill);
-  // If the branch is taken, no harm in executing this in the delay slot.
-  masm.delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
-  masm.sub(L0, oopSize, L0);
-
-  masm.st_ptr(O3, L1, L0);  // [_buf + index] := I0
+  __ bind(restart);
+
+  // Load the index into the update buffer. PtrQueue::_index is
+  // a size_t so ld_ptr is appropriate here.
+  __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
+
+  // index == 0?
+  __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
+
+  __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
+  __ sub(L0, oopSize, L0);
+
+  __ st_ptr(O3, L1, L0);  // [_buf + index] := I0
   // Use return-from-leaf
-  masm.retl();
-  masm.delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
-
-  masm.bind(refill);
+  __ retl();
+  __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
+
+  __ bind(refill);
   address handle_zero =
     CAST_FROM_FN_PTR(address,
                      &DirtyCardQueueSet::handle_zero_index_for_thread);
   // This should be rare enough that we can afford to save all the
   // scratch registers that the calling context might be using.
-  masm.mov(G1_scratch, L3);
-  masm.mov(G3_scratch, L5);
+  __ mov(G1_scratch, L3);
+  __ mov(G3_scratch, L5);
   // We need the value of O3 above (for the write into the buffer), so we
   // save and restore it.
-  masm.mov(O3, L6);
+  __ mov(O3, L6);
   // Since the call will overwrite O7, we save and restore that, as well.
-  masm.mov(O7, L4);
-
-  masm.call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
-  masm.mov(L3, G1_scratch);
-  masm.mov(L5, G3_scratch);
-  masm.mov(L6, O3);
-  masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
-  masm.delayed()->mov(L4, O7);
+  __ mov(O7, L4);
+
+  __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
+  __ mov(L3, G1_scratch);
+  __ mov(L5, G3_scratch);
+  __ mov(L6, O3);
+  __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
+  __ delayed()->mov(L4, O7);
 
   dirty_card_log_enqueue = start;
-  dirty_card_log_enqueue_end = masm.pc();
+  dirty_card_log_enqueue_end = __ pc();
   // XXX Should have a guarantee here about not going off the end!
   // Does it already do so?  Do an experiment...
+
+#undef __
+
 }
 
 static inline void
@@ -4618,6 +4616,7 @@
   G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
   assert(bs->kind() == BarrierSet::G1SATBCT ||
          bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
+
   if (G1RSBarrierRegionFilter) {
     xor3(store_addr, new_val, tmp);
 #ifdef _LP64
@@ -4626,33 +4625,8 @@
     srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
 #endif
 
-    if (G1PrintCTFilterStats) {
-      guarantee(tmp->is_global(), "Or stats won't work...");
-      // This is a sleazy hack: I'm temporarily hijacking G2, which I
-      // promise to restore.
-      mov(new_val, G2);
-      save_frame(0);
-      mov(tmp, O0);
-      mov(G2, O1);
-      // Save G-regs that target may use.
-      mov(G1, L1);
-      mov(G2, L2);
-      mov(G3, L3);
-      mov(G4, L4);
-      mov(G5, L5);
-      call(CAST_FROM_FN_PTR(address, &count_ct_writes));
-      delayed()->nop();
-      mov(O0, G2);
-      // Restore G-regs that target may have used.
-      mov(L1, G1);
-      mov(L3, G3);
-      mov(L4, G4);
-      mov(L5, G5);
-      restore(G0, G0, G0);
-    }
-    // XXX Should I predict this taken or not?  Does it mattern?
-    br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
-    delayed()->nop();
+    // XXX Should I predict this taken or not?  Does it matter?
+    cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
   }
 
   // If the "store_addr" register is an "in" or "local" register, move it to
@@ -4677,7 +4651,6 @@
   restore();
 
   bind(filtered);
-
 }
 
 #endif  // SERIALGC
@@ -4903,7 +4876,7 @@
   delayed()->mov(G0, result);     // not equal
 
   // only one char ?
-  br_on_reg_cond(rc_z, true, Assembler::pn, limit, Ldone);
+  cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
   delayed()->add(G0, 1, result); // zero-length arrays are equal
 
   // word by word compare, dont't need alignment check
@@ -4927,3 +4900,64 @@
   // Caller should set it:
   // add(G0, 1, result); // equals
 }
+
+// Use BIS for zeroing (count is in bytes).
+void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
+  assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
+  Register end = count;
+  int cache_line_size = VM_Version::prefetch_data_size();
+  // Minimum count when BIS zeroing can be used since
+  // it needs membar which is expensive.
+  int block_zero_size  = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit);
+
+  Label small_loop;
+  // Check if count is negative (dead code) or zero.
+  // Note, count uses 64bit in 64 bit VM.
+  cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone);
+
+  // Use BIS zeroing only for big arrays since it requires membar.
+  if (Assembler::is_simm13(block_zero_size)) { // < 4096
+    cmp(count, block_zero_size);
+  } else {
+    set(block_zero_size, temp);
+    cmp(count, temp);
+  }
+  br(Assembler::lessUnsigned, false, Assembler::pt, small_loop);
+  delayed()->add(to, count, end);
+
+  // Note: size is >= three (32 bytes) cache lines.
+
+  // Clean the beginning of space up to next cache line.
+  for (int offs = 0; offs < cache_line_size; offs += 8) {
+    stx(G0, to, offs);
+  }
+
+  // align to next cache line
+  add(to, cache_line_size, to);
+  and3(to, -cache_line_size, to);
+
+  // Note: size left >= two (32 bytes) cache lines.
+
+  // BIS should not be used to zero tail (64 bytes)
+  // to avoid zeroing a header of the following object.
+  sub(end, (cache_line_size*2)-8, end);
+
+  Label bis_loop;
+  bind(bis_loop);
+  stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
+  add(to, cache_line_size, to);
+  cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop);
+
+  // BIS needs membar.
+  membar(Assembler::StoreLoad);
+
+  add(end, (cache_line_size*2)-8, end); // restore end
+  cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone);
+
+  // Clean the tail.
+  bind(small_loop);
+  stx(G0, to, 0);
+  add(to, 8, to);
+  cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);
+  nop(); // Separate short branches
+}
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -716,6 +716,8 @@
     casa_op3     = 0x3c,
     casxa_op3    = 0x3e,
 
+    mftoi_op3    = 0x36,
+
     alt_bit_op3  = 0x10,
      cc_bit_op3  = 0x10
   };
@@ -750,10 +752,16 @@
     fitod_opf   = 0xc8,
     fstod_opf   = 0xc9,
     fstoi_opf   = 0xd1,
-    fdtoi_opf   = 0xd2
+    fdtoi_opf   = 0xd2,
+
+    mdtox_opf   = 0x110,
+    mstouw_opf  = 0x111,
+    mstosw_opf  = 0x113,
+    mxtod_opf   = 0x118,
+    mwtos_opf   = 0x119
   };
 
-  enum RCondition {  rc_z = 1,  rc_lez = 2,  rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7  };
+  enum RCondition {  rc_z = 1,  rc_lez = 2,  rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7, rc_last = rc_gez  };
 
   enum Condition {
      // for FBfcc & FBPfcc instruction
@@ -858,9 +866,18 @@
     return is_simm(d, nbits + 2);
   }
 
+  address target_distance(Label& L) {
+    // Assembler::target(L) should be called only when
+    // a branch instruction is emitted since non-bound
+    // labels record current pc() as a branch address.
+    if (L.is_bound()) return target(L);
+    // Return current address for non-bound labels.
+    return pc();
+  }
+
   // test if label is in simm16 range in words (wdisp16).
   bool is_in_wdisp16_range(Label& L) {
-    return is_in_wdisp_range(target(L), pc(), 16);
+    return is_in_wdisp_range(target_distance(L), pc(), 16);
   }
   // test if the distance between two addresses fits in simm30 range in words
   static bool is_in_wdisp30_range(address a, address b) {
@@ -868,8 +885,13 @@
   }
 
   enum ASIs { // page 72, v9
-    ASI_PRIMARY        = 0x80,
-    ASI_PRIMARY_LITTLE = 0x88
+    ASI_PRIMARY            = 0x80,
+    ASI_PRIMARY_NOFAULT    = 0x82,
+    ASI_PRIMARY_LITTLE     = 0x88,
+    // Block initializing store
+    ASI_ST_BLKINIT_PRIMARY = 0xE2,
+    // Most-Recently-Used (MRU) BIS variant
+    ASI_ST_BLKINIT_MRU_PRIMARY = 0xF2
     // add more from book as needed
   };
 
@@ -967,6 +989,20 @@
   static int sx(       int         i)  { return  u_field(i,             12, 12); } // shift x=1 means 64-bit
   static int opf(      int         x)  { return  u_field(x,             13,  5); }
 
+  static bool is_cbcond( int x ) {
+    return (VM_Version::has_cbcond() && (inv_cond(x) > rc_last) &&
+            inv_op(x) == branch_op && inv_op2(x) == bpr_op2);
+  }
+  static bool is_cxb( int x ) {
+    assert(is_cbcond(x), "wrong instruction");
+    return (x & (1<<21)) != 0;
+  }
+  static int cond_cbcond( int         x)  { return  u_field((((x & 8)<<1) + 8 + (x & 7)), 29, 25); }
+  static int inv_cond_cbcond(int      x)  {
+    assert(is_cbcond(x), "wrong instruction");
+    return inv_u_field(x, 27, 25) | (inv_u_field(x, 29, 29)<<3);
+  }
+
   static int opf_cc(   CC          c, bool useFloat ) { return u_field((useFloat ? 0 : 4) + c, 13, 11); }
   static int mov_cc(   CC          c, bool useFloat ) { return u_field(useFloat ? 0 : 1,  18, 18) | u_field(c, 12, 11); }
 
@@ -1018,6 +1054,26 @@
     return r;
   }
 
+  // compute inverse of wdisp10
+  static intptr_t inv_wdisp10(int x, intptr_t pos) {
+    assert(is_cbcond(x), "wrong instruction");
+    int lo = inv_u_field(x, 12, 5);
+    int hi = (x >> 19) & 3;
+    if (hi >= 2) hi |= ~1;
+    return (((hi << 8) | lo) << 2) + pos;
+  }
+
+  // word offset for cbcond, 8 bits at [B12,B5], 2 bits at [B20,B19]
+  static int wdisp10(intptr_t x, intptr_t off) {
+    assert(VM_Version::has_cbcond(), "This CPU does not have CBCOND instruction");
+    intptr_t xx = x - off;
+    assert_signed_word_disp_range(xx, 10);
+    int r =  ( ( (xx >>  2   ) & ((1 << 8) - 1) ) <<  5 )
+           | ( ( (xx >> (2+8)) & 3              ) << 19 );
+    // Have to fake cbcond instruction to pass assert in inv_wdisp10()
+    assert(inv_wdisp10((r | op(branch_op) | cond_cbcond(rc_last+1) | op2(bpr_op2)), off) == x,  "inverse is not inverse");
+    return r;
+  }
 
   // word displacement in low-order nbits bits
 
@@ -1061,6 +1117,9 @@
     return x & ((1 << 10) - 1);
   }
 
+  // instruction only in VIS3
+  static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
+
   // instruction only in v9
   static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
 
@@ -1127,7 +1186,26 @@
 #endif
   }
 
+  // cbcond instruction should not be generated one after an other
+  bool cbcond_before() {
+    if (offset() == 0) return false; // it is first instruction
+    int x = *(int*)(intptr_t(pc()) - 4); // previous instruction
+    return is_cbcond(x);
+  }
+
+  void no_cbcond_before() {
+    assert(offset() == 0 || !cbcond_before(), "cbcond should not follow an other cbcond");
+  }
+
 public:
+
+  bool use_cbcond(Label& L) {
+    if (!UseCBCond || cbcond_before()) return false;
+    intptr_t x = intptr_t(target_distance(L)) - intptr_t(pc());
+    assert( (x & 3) == 0, "not word aligned");
+    return is_simm(x, 12);
+  }
+
   // Tells assembler you know that next instruction is delayed
   Assembler* delayed() {
 #ifdef CHECK_DELAY
@@ -1170,10 +1248,15 @@
   void addccc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void addccc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
+
   // pp 136
 
-  inline void bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
-  inline void bpr( RCondition c, bool a, Predict p, Register s1, Label& L);
+  inline void bpr(RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none);
+  inline void bpr(RCondition c, bool a, Predict p, Register s1, Label& L);
+
+  // compare and branch
+  inline void cbcond(Condition c, CC cc, Register s1, Register s2, Label& L);
+  inline void cbcond(Condition c, CC cc, Register s1, int simm5, Label& L);
 
  protected: // use MacroAssembler::br instead
 
@@ -1187,8 +1270,6 @@
   inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
 
- public:
-
   // pp 144
 
   inline void br( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
@@ -1209,6 +1290,8 @@
   inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
   inline void call( Label& L,   relocInfo::relocType rt = relocInfo::runtime_call_type );
 
+ public:
+
   // pp 150
 
   // These instructions compare the contents of s2 with the contents of
@@ -1247,8 +1330,8 @@
 
   // pp 159
 
-  void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
-  void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
+  void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
+  void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
 
   // pp 160
 
@@ -1256,8 +1339,8 @@
 
   // pp 161
 
-  void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, w)); }
-  void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, w)); }
+  void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, FloatRegisterImpl::D)); }
+  void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, FloatRegisterImpl::S)); }
 
   // pp 162
 
@@ -1704,11 +1787,27 @@
                                                                            rs1(s) |
                                                                            op3(wrreg_op3) |
                                                                            u_field(2, 29, 25) |
-                                                                           u_field(1, 13, 13) |
+                                                                           immed(true) |
                                                                            simm(simm13a, 13)); }
-  inline void wrasi(  Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
+  inline void wrasi(Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
+  // wrasi(d, imm) stores (d xor imm) to asi
+  inline void wrasi(Register d, int simm13a) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) |
+                                               u_field(3, 29, 25) | immed(true) | simm(simm13a, 13)); }
   inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
 
+
+  // VIS3 instructions
+
+  void movstosw( FloatRegister s, Register d ) { vis3_only();  emit_long( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S)); }
+  void movstouw( FloatRegister s, Register d ) { vis3_only();  emit_long( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstouw_opf) | fs2(s, FloatRegisterImpl::S)); }
+  void movdtox(  FloatRegister s, Register d ) { vis3_only();  emit_long( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mdtox_opf) | fs2(s, FloatRegisterImpl::D)); }
+
+  void movwtos( Register s, FloatRegister d ) { vis3_only();  emit_long( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); }
+  void movxtod( Register s, FloatRegister d ) { vis3_only();  emit_long( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); }
+
+
+
+
   // For a given register condition, return the appropriate condition code
   // Condition (the one you would use to get the same effect after "tst" on
   // the target register.)
@@ -1838,18 +1937,32 @@
   inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   inline void fb( Condition c, bool a, Predict p, Label& L );
 
-  // compares register with zero and branches (V9 and V8 instructions)
-  void br_zero( Condition c, bool a, Predict p, Register s1, Label& L);
+  // compares register with zero (32 bit) and branches (V9 and V8 instructions)
+  void cmp_zero_and_br( Condition c, Register s1, Label& L, bool a = false, Predict p = pn );
   // Compares a pointer register with zero and branches on (not)null.
   // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
   void br_null   ( Register s1, bool a, Predict p, Label& L );
   void br_notnull( Register s1, bool a, Predict p, Label& L );
 
-  // These versions will do the most efficient thing on v8 and v9.  Perhaps
-  // this is what the routine above was meant to do, but it didn't (and
-  // didn't cover both target address kinds.)
-  void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
-  void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, Label& L);
+  //
+  // Compare registers and branch with nop in delay slot or cbcond without delay slot.
+  //
+  // ATTENTION: use these instructions with caution because cbcond instruction
+  //            has very short distance: 512 instructions (2Kbyte).
+
+  // Compare integer (32 bit) values (icc only).
+  void cmp_and_br_short(Register s1, Register s2, Condition c, Predict p, Label& L);
+  void cmp_and_br_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
+  // Platform depending version for pointer compare (icc on !LP64 and xcc on LP64).
+  void cmp_and_brx_short(Register s1, Register s2, Condition c, Predict p, Label& L);
+  void cmp_and_brx_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
+
+  // Short branch version for compares a pointer pwith zero.
+  void br_null_short   ( Register s1, Predict p, Label& L );
+  void br_notnull_short( Register s1, Predict p, Label& L );
+
+  // unconditional short branch
+  void ba_short(Label& L);
 
   inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
@@ -1858,8 +1971,8 @@
   inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   inline void brx( Condition c, bool a, Predict p, Label& L );
 
-  // unconditional short branch
-  inline void ba( bool a, Label& L );
+  // unconditional branch
+  inline void ba( Label& L );
 
   // Branch that tests fp condition codes
   inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
@@ -2143,7 +2256,6 @@
 
   inline void stbool(Register d, const Address& a) { stb(d, a); }
   inline void ldbool(const Address& a, Register d) { ldsb(a, d); }
-  inline void tstbool( Register s ) { tst(s); }
   inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
 
   // klass oop manipulations if compressed
@@ -2445,8 +2557,7 @@
                                      Label* L_success,
                                      Label* L_failure,
                                      Label* L_slow_path,
-                RegisterOrConstant super_check_offset = RegisterOrConstant(-1),
-                Register instanceof_hack = noreg);
+                RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 
   // The rest of the type check; must be wired to a corresponding fast path.
   // It does not repeat the fast path logic, so don't use it standalone.
@@ -2518,6 +2629,8 @@
   void char_arrays_equals(Register ary1, Register ary2,
                           Register limit, Register result,
                           Register chr1, Register chr2, Label& Ldone);
+  // Use BIS for zeroing
+  void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);
 
 #undef VIRTUAL
 
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -80,32 +80,36 @@
 inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
 inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
 
-inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt);  has_delay_slot(); }
+inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt);  has_delay_slot(); }
 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
 
-inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
+inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
 inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); }
 
-inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);  has_delay_slot(); }
+inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);  has_delay_slot(); }
 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
 
-inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
+inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
 inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
 
-inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep();   emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
+inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep();  cti();   emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
 inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
 
-inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);  has_delay_slot(); }
+inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);  has_delay_slot(); }
 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); }
 
-inline void Assembler::call( address d,  relocInfo::relocType rt ) { emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt);  has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
+// compare and branch
+inline void Assembler::cbcond(Condition c, CC cc, Register s1, Register s2, Label& L) { cti();  no_cbcond_before();  emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2)); }
+inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label& L)   { cti();  no_cbcond_before();  emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5)); }
+
+inline void Assembler::call( address d,  relocInfo::relocType rt ) { cti();  emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt);  has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
 inline void Assembler::call( Label& L,   relocInfo::relocType rt ) { call( target(L), rt); }
 
 inline void Assembler::flush( Register s1, Register s2) { emit_long( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
 inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-inline void Assembler::jmpl( Register s1, Register s2, Register d                          ) { emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
-inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec);  has_delay_slot(); }
+inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti();  emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
+inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti();  emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec);  has_delay_slot(); }
 
 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
   if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
@@ -240,8 +244,8 @@
 inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); }
 
 
-inline void Assembler::rett( Register s1, Register s2                         ) { emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
-inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt);  has_delay_slot(); }
+inline void Assembler::rett( Register s1, Register s2                         ) { cti();  emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
+inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti();  emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt);  has_delay_slot(); }
 
 inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); }
 
@@ -557,8 +561,8 @@
   brx(c, a, p, target(L));
 }
 
-inline void MacroAssembler::ba( bool a, Label& L ) {
-  br(always, a, pt, L);
+inline void MacroAssembler::ba( Label& L ) {
+  br(always, false, pt, L);
 }
 
 // Warning: V9 only functions
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -303,9 +303,7 @@
     assert(_oop_index >= 0, "must have oop index");
     __ load_heap_oop(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
     __ ld_ptr(G3, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
-    __ cmp(G2_thread, G3);
-    __ br(Assembler::notEqual, false, Assembler::pn, call_patch);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
 
     // load_klass patches may execute the patched code before it's
     // copied back into place so we need to jump back into the main
@@ -423,8 +421,7 @@
   }
 
   if (__ is_in_wdisp16_range(_continuation)) {
-    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
-                      pre_val_reg, _continuation);
+    __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation);
   } else {
     __ cmp(pre_val_reg, G0);
     __ brx(Assembler::equal, false, Assembler::pn, _continuation);
@@ -460,8 +457,7 @@
     // The original src operand was not a constant.
     // Generate src == null?
     if (__ is_in_wdisp16_range(_continuation)) {
-      __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
-                        src_reg, _continuation);
+      __ br_null(src_reg, /*annul*/false, Assembler::pt, _continuation);
     } else {
       __ cmp(src_reg, G0);
       __ brx(Assembler::equal, false, Assembler::pt, _continuation);
@@ -478,13 +474,9 @@
   Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
   __ ld(ref_type_adr, tmp_reg);
 
-  if (__ is_in_wdisp16_range(_continuation)) {
-    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
-                      tmp_reg, _continuation);
-  } else {
-    __ cmp(tmp_reg, G0);
-    __ brx(Assembler::equal, false, Assembler::pt, _continuation);
-  }
+  // _reference_type field is of type ReferenceType (enum)
+  assert(REF_NONE == 0, "check this code");
+  __ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
   __ delayed()->nop();
 
   // Is marking active?
@@ -500,13 +492,8 @@
     assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
     __ ldsb(in_progress, tmp_reg);
   }
-  if (__ is_in_wdisp16_range(_continuation)) {
-    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
-                      tmp_reg, _continuation);
-  } else {
-    __ cmp(tmp_reg, G0);
-    __ brx(Assembler::equal, false, Assembler::pt, _continuation);
-  }
+
+  __ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
   __ delayed()->nop();
 
   // val == null?
@@ -514,8 +501,7 @@
   Register val_reg = val()->as_register();
 
   if (__ is_in_wdisp16_range(_continuation)) {
-    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
-                      val_reg, _continuation);
+    __ br_null(val_reg, /*annul*/false, Assembler::pt, _continuation);
   } else {
     __ cmp(val_reg, G0);
     __ brx(Assembler::equal, false, Assembler::pt, _continuation);
@@ -544,9 +530,9 @@
   assert(new_val()->is_register(), "Precondition.");
   Register addr_reg = addr()->as_pointer_register();
   Register new_val_reg = new_val()->as_register();
+
   if (__ is_in_wdisp16_range(_continuation)) {
-    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
-                      new_val_reg, _continuation);
+    __ br_null(new_val_reg, /*annul*/false, Assembler::pt, _continuation);
   } else {
     __ cmp(new_val_reg, G0);
     __ brx(Assembler::equal, false, Assembler::pn, _continuation);
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -142,11 +142,6 @@
 }
 
 
-LIR_Opr LIR_Assembler::incomingReceiverOpr() {
-  return FrameMap::I0_oop_opr;
-}
-
-
 LIR_Opr LIR_Assembler::osrBufferPointer() {
   return FrameMap::I0_opr;
 }
@@ -217,9 +212,7 @@
       {
         Label L;
         __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
-        __ cmp(G0, O7);
-        __ br(Assembler::notEqual, false, Assembler::pt, L);
-        __ delayed()->nop();
+        __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L);
         __ stop("locked object is NULL");
         __ bind(L);
       }
@@ -2096,10 +2089,10 @@
       __ xor3(O0, -1, tmp);
       __ sub(length, tmp, length);
       __ add(src_pos, tmp, src_pos);
-      __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
+      __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
       __ delayed()->add(dst_pos, tmp, dst_pos);
     } else {
-      __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
+      __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
       __ delayed()->nop();
     }
     __ bind(*stub->continuation());
@@ -2123,22 +2116,19 @@
 
   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
     // test src_pos register
-    __ tst(src_pos);
-    __ br(Assembler::less, false, Assembler::pn, *stub->entry());
+    __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry());
     __ delayed()->nop();
   }
 
   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
     // test dst_pos register
-    __ tst(dst_pos);
-    __ br(Assembler::less, false, Assembler::pn, *stub->entry());
+    __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry());
     __ delayed()->nop();
   }
 
   if (flags & LIR_OpArrayCopy::length_positive_check) {
     // make sure length isn't negative
-    __ tst(length);
-    __ br(Assembler::less, false, Assembler::pn, *stub->entry());
+    __ cmp_zero_and_br(Assembler::less, length, *stub->entry());
     __ delayed()->nop();
   }
 
@@ -2261,8 +2251,7 @@
 #ifndef PRODUCT
         if (PrintC1Statistics) {
           Label failed;
-          __ br_notnull(O0, false, Assembler::pn,  failed);
-          __ delayed()->nop();
+          __ br_notnull_short(O0, Assembler::pn, failed);
           __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3);
           __ bind(failed);
         }
@@ -2314,9 +2303,7 @@
         __ br(Assembler::notEqual, false, Assembler::pn, halt);
         // load the raw value of the src klass.
         __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
-        __ cmp(tmp, tmp2);
-        __ br(Assembler::equal, false, Assembler::pn, known_ok);
-        __ delayed()->nop();
+        __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
       } else {
         __ cmp(tmp, tmp2);
         __ br(Assembler::equal, false, Assembler::pn, known_ok);
@@ -2330,9 +2317,7 @@
         __ cmp(tmp, tmp2);
         __ brx(Assembler::notEqual, false, Assembler::pn, halt);
         __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
-        __ cmp(tmp, tmp2);
-        __ brx(Assembler::equal, false, Assembler::pn, known_ok);
-        __ delayed()->nop();
+        __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
       } else {
         __ cmp(tmp, tmp2);
         __ brx(Assembler::equal, false, Assembler::pn, known_ok);
@@ -2530,15 +2515,13 @@
                           mdo_offset_bias);
     __ ld_ptr(receiver_addr, tmp1);
     __ verify_oop(tmp1);
-    __ cmp(recv, tmp1);
-    __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
                       mdo_offset_bias);
     __ ld_ptr(data_addr, tmp1);
     __ add(tmp1, DataLayout::counter_increment, tmp1);
     __ st_ptr(tmp1, data_addr);
-    __ ba(false, *update_done);
+    __ ba(*update_done);
     __ delayed()->nop();
     __ bind(next_test);
   }
@@ -2549,13 +2532,12 @@
     Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
                       mdo_offset_bias);
     __ ld_ptr(recv_addr, tmp1);
-    __ br_notnull(tmp1, false, Assembler::pt, next_test);
-    __ delayed()->nop();
+    __ br_notnull_short(tmp1, Assembler::pt, next_test);
     __ st_ptr(recv, recv_addr);
     __ set(DataLayout::counter_increment, tmp1);
     __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
               mdo_offset_bias);
-    __ ba(false, *update_done);
+    __ ba(*update_done);
     __ delayed()->nop();
     __ bind(next_test);
   }
@@ -2601,8 +2583,7 @@
     setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
 
     Label not_null;
-    __ br_notnull(obj, false, Assembler::pn, not_null);
-    __ delayed()->nop();
+    __ br_notnull_short(obj, Assembler::pn, not_null);
     Register mdo      = k_RInfo;
     Register data_val = Rtmp1;
     jobject2reg(md->constant_encoding(), mdo);
@@ -2614,7 +2595,7 @@
     __ ldub(flags_addr, data_val);
     __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
     __ stb(data_val, flags_addr);
-    __ ba(false, *obj_is_null);
+    __ ba(*obj_is_null);
     __ delayed()->nop();
     __ bind(not_null);
   } else {
@@ -2682,7 +2663,7 @@
     __ load_klass(obj, recv);
     type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
     // Jump over the failure case
-    __ ba(false, *success);
+    __ ba(*success);
     __ delayed()->nop();
     // Cast failure case
     __ bind(profile_cast_failure);
@@ -2695,10 +2676,10 @@
     __ ld_ptr(data_addr, tmp1);
     __ sub(tmp1, DataLayout::counter_increment, tmp1);
     __ st_ptr(tmp1, data_addr);
-    __ ba(false, *failure);
+    __ ba(*failure);
     __ delayed()->nop();
   }
-  __ ba(false, *success);
+  __ ba(*success);
   __ delayed()->nop();
 }
 
@@ -2728,8 +2709,7 @@
 
     if (op->should_profile()) {
       Label not_null;
-      __ br_notnull(value, false, Assembler::pn, not_null);
-      __ delayed()->nop();
+      __ br_notnull_short(value, Assembler::pn, not_null);
       Register mdo      = k_RInfo;
       Register data_val = Rtmp1;
       jobject2reg(md->constant_encoding(), mdo);
@@ -2741,12 +2721,10 @@
       __ ldub(flags_addr, data_val);
       __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
       __ stb(data_val, flags_addr);
-      __ ba(false, done);
-      __ delayed()->nop();
+      __ ba_short(done);
       __ bind(not_null);
     } else {
-      __ br_null(value, false, Assembler::pn, done);
-      __ delayed()->nop();
+      __ br_null_short(value, Assembler::pn, done);
     }
     add_debug_info_for_null_check_here(op->info_for_exception());
     __ load_klass(array, k_RInfo);
@@ -2777,8 +2755,7 @@
       }
       __ load_klass(value, recv);
       type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
-      __ ba(false, done);
-      __ delayed()->nop();
+      __ ba_short(done);
       // Cast failure case
       __ bind(profile_cast_failure);
       jobject2reg(md->constant_encoding(), mdo);
@@ -2790,7 +2767,7 @@
       __ ld_ptr(data_addr, tmp1);
       __ sub(tmp1, DataLayout::counter_increment, tmp1);
       __ st_ptr(tmp1, data_addr);
-      __ ba(false, *stub->entry());
+      __ ba(*stub->entry());
       __ delayed()->nop();
     }
     __ bind(done);
@@ -2808,8 +2785,7 @@
     emit_typecheck_helper(op, &success, &failure, &failure);
     __ bind(failure);
     __ set(0, dst);
-    __ ba(false, done);
-    __ delayed()->nop();
+    __ ba_short(done);
     __ bind(success);
     __ set(1, dst);
     __ bind(done);
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -41,9 +41,7 @@
   // Note: needs more testing of out-of-line vs. inline slow case
   verify_oop(receiver);
   load_klass(receiver, temp_reg);
-  cmp(temp_reg, iCache);
-  brx(Assembler::equal, true, Assembler::pt, L);
-  delayed()->nop();
+  cmp_and_brx_short(temp_reg, iCache, Assembler::equal, Assembler::pt, L);
   AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
   jump_to(ic_miss, temp_reg);
   delayed()->nop();
@@ -142,8 +140,7 @@
   }
   // Test first it it is a fast recursive unlock
   ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
-  br_null(Rmark, false, Assembler::pt, done);
-  delayed()->nop();
+  br_null_short(Rmark, Assembler::pt, done);
   if (!UseBiasedLocking) {
     // load object
     ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop);
@@ -231,7 +228,7 @@
   if (!is_simm13(obj_size * wordSize)) {
     // would need to use extra register to load
     // object size => go the slow case for now
-    br(Assembler::always, false, Assembler::pt, slow_case);
+    ba(slow_case);
     delayed()->nop();
     return;
   }
@@ -257,12 +254,10 @@
     Label ok;
     ld(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), t1);
     if (var_size_in_bytes != noreg) {
-      cmp(t1, var_size_in_bytes);
+      cmp_and_brx_short(t1, var_size_in_bytes, Assembler::equal, Assembler::pt, ok);
     } else {
-      cmp(t1, con_size_in_bytes);
+      cmp_and_brx_short(t1, con_size_in_bytes, Assembler::equal, Assembler::pt, ok);
     }
-    brx(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
     stop("bad size in initialize_object");
     should_not_reach_here();
 
@@ -387,8 +382,7 @@
 
 void C1_MacroAssembler::verify_not_null_oop(Register r) {
   Label not_null;
-  br_notnull(r, false, Assembler::pt, not_null);
-  delayed()->nop();
+  br_notnull_short(r, Assembler::pt, not_null);
   stop("non-null oop required");
   bind(not_null);
   if (!VerifyOops) return;
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -71,8 +71,7 @@
   { Label L;
     Address exception_addr(G2_thread, Thread::pending_exception_offset());
     ld_ptr(exception_addr, Gtemp);
-    br_null(Gtemp, false, pt, L);
-    delayed()->nop();
+    br_null_short(Gtemp, pt, L);
     Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
     st_ptr(G0, vm_result_addr);
     Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
@@ -333,9 +332,7 @@
   assert(deopt_blob != NULL, "deoptimization blob must have been created");
 
   Label no_deopt;
-  __ tst(O0);
-  __ brx(Assembler::equal, false, Assembler::pt, no_deopt);
-  __ delayed()->nop();
+  __ br_null_short(O0, Assembler::pt, no_deopt);
 
   // return to the deoptimization handler entry for unpacking and rexecute
   // if we simply returned the we'd deopt as if any call we patched had just
@@ -402,18 +399,15 @@
           if (id == fast_new_instance_init_check_id) {
             // make sure the klass is initialized
             __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
-            __ cmp(G3_t1, instanceKlass::fully_initialized);
-            __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
-            __ delayed()->nop();
+            __ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
           }
 #ifdef ASSERT
           // assert object can be fast path allocated
           {
             Label ok, not_ok;
           __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
-          __ cmp(G1_obj_size, 0);  // make sure it's an instance (LH > 0)
-          __ br(Assembler::lessEqual, false, Assembler::pn, not_ok);
-          __ delayed()->nop();
+          // make sure it's an instance (LH > 0)
+          __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
           __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
           __ br(Assembler::zero, false, Assembler::pn, ok);
           __ delayed()->nop();
@@ -501,9 +495,7 @@
           int tag = ((id == new_type_array_id)
                      ? Klass::_lh_array_tag_type_value
                      : Klass::_lh_array_tag_obj_value);
-          __ cmp(G3_t1, tag);
-          __ brx(Assembler::equal, false, Assembler::pt, ok);
-          __ delayed()->nop();
+          __ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok);
           __ stop("assert(is an array klass)");
           __ should_not_reach_here();
           __ bind(ok);
@@ -519,9 +511,7 @@
 
           // check that array length is small enough for fast path
           __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
-          __ cmp(G4_length, G3_t1);
-          __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
-          __ delayed()->nop();
+          __ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path);
 
           // if we got here then the TLAB allocation failed, so try
           // refilling the TLAB or allocating directly from eden.
@@ -792,13 +782,6 @@
       }
       break;
 
-    case jvmti_exception_throw_id:
-      { // Oexception : exception
-        __ set_info("jvmti_exception_throw", dont_gc_arguments);
-        oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
-      }
-      break;
-
     case dtrace_object_alloc_id:
       { // O0: object
         __ set_info("dtrace_object_alloc", dont_gc_arguments);
@@ -844,14 +827,16 @@
         int satb_q_buf_byte_offset =
           in_bytes(JavaThread::satb_mark_queue_offset() +
                    PtrQueue::byte_offset_of_buf());
+
         __ bind(restart);
+        // Load the index into the SATB buffer. PtrQueue::_index is a
+        // size_t so ld_ptr is appropriate
         __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);
 
-        __ br_on_reg_cond(Assembler::rc_z, /*annul*/false,
-                          Assembler::pn, tmp, refill);
+        // index == 0?
+        __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill);
 
-        // If the branch is taken, no harm in executing this in the delay slot.
-        __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
+        __ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
         __ sub(tmp, oopSize, tmp);
 
         __ st_ptr(pre_val, tmp2, tmp);  // [_buf + index] := <address_of_card>
@@ -911,11 +896,8 @@
         __ set(rs, cardtable);         // cardtable := <card table base>
         __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
 
-        __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
-                          tmp, not_already_dirty);
-        // Get cardtable + tmp into a reg by itself -- useful in the take-the-branch
-        // case, harmless if not.
-        __ delayed()->add(addr, cardtable, tmp2);
+        assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
+        __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
 
         // We didn't take the branch, so we're already dirty: return.
         // Use return-from-leaf
@@ -924,6 +906,10 @@
 
         // Not dirty.
         __ bind(not_already_dirty);
+
+        // Get cardtable + tmp into a reg by itself
+        __ add(addr, cardtable, tmp2);
+
         // First, dirty it.
         __ stb(G0, tmp2, 0);  // [cardPtr] := 0  (i.e., dirty).
 
@@ -939,13 +925,17 @@
         int dirty_card_q_buf_byte_offset =
           in_bytes(JavaThread::dirty_card_queue_offset() +
                    PtrQueue::byte_offset_of_buf());
+
         __ bind(restart);
+
+        // Get the index into the update buffer. PtrQueue::_index is
+        // a size_t so ld_ptr is appropriate here.
         __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
 
-        __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
-                          tmp3, refill);
-        // If the branch is taken, no harm in executing this in the delay slot.
-        __ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
+        // index == 0?
+        __ cmp_and_brx_short(tmp3, G0, Assembler::equal,  Assembler::pn, refill);
+
+        __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
         __ sub(tmp3, oopSize, tmp3);
 
         __ st_ptr(tmp2, tmp4, tmp3);  // [_buf + index] := <address_of_card>
--- a/src/cpu/sparc/vm/copy_sparc.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/copy_sparc.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -156,9 +156,16 @@
 #endif // _LP64
 }
 
+typedef void (*_zero_Fn)(HeapWord* to, size_t count);
+
 static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
   assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
 
+  if (value == 0 && UseBlockZeroing &&
+      (count > (BlockZeroingLowLimit >> LogHeapWordSize))) {
+   // Call it only when block zeroing is used
+   ((_zero_Fn)StubRoutines::zero_aligned_words())(tohw, count);
+  } else {
    julong* to = (julong*)tohw;
    julong  v  = ((julong)value << 32) | value;
    // If count is odd, odd will be equal to 1 on 32-bit platform
@@ -176,6 +183,7 @@
      *((juint*)to) = value;
 
    }
+  }
 }
 
 static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -544,7 +544,7 @@
 
     // Generate regular method entry
     __ bind(slow_path);
-    __ ba(false, fast_accessor_slow_entry_path);
+    __ ba(fast_accessor_slow_entry_path);
     __ delayed()->nop();
     return entry;
   }
@@ -719,8 +719,7 @@
 
     Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
     __ ld_ptr(exception_addr, G3_scratch);
-    __ br_notnull(G3_scratch, false, Assembler::pn, pending_exception_present);
-    __ delayed()->nop();
+    __ br_notnull_short(G3_scratch, Assembler::pn, pending_exception_present);
     __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch);
     __ bind(L);
   }
@@ -1292,7 +1291,7 @@
   deopt_frame_manager_return_atos  = __ pc();
 
   // O0/O1 live
-  __ ba(false, return_from_deopt_common);
+  __ ba(return_from_deopt_common);
   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_OBJECT), L3_scratch);    // Result stub address array index
 
 
@@ -1300,14 +1299,14 @@
   deopt_frame_manager_return_btos  = __ pc();
 
   // O0/O1 live
-  __ ba(false, return_from_deopt_common);
+  __ ba(return_from_deopt_common);
   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_BOOLEAN), L3_scratch);    // Result stub address array index
 
   // deopt needs to jump to here to enter the interpreter (return a result)
   deopt_frame_manager_return_itos  = __ pc();
 
   // O0/O1 live
-  __ ba(false, return_from_deopt_common);
+  __ ba(return_from_deopt_common);
   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_INT), L3_scratch);    // Result stub address array index
 
   // deopt needs to jump to here to enter the interpreter (return a result)
@@ -1327,21 +1326,21 @@
   __ srlx(G1,32,O0);
 #endif /* !_LP64 && COMPILER2 */
   // O0/O1 live
-  __ ba(false, return_from_deopt_common);
+  __ ba(return_from_deopt_common);
   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_LONG), L3_scratch);    // Result stub address array index
 
   // deopt needs to jump to here to enter the interpreter (return a result)
 
   deopt_frame_manager_return_ftos  = __ pc();
   // O0/O1 live
-  __ ba(false, return_from_deopt_common);
+  __ ba(return_from_deopt_common);
   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_FLOAT), L3_scratch);    // Result stub address array index
 
   // deopt needs to jump to here to enter the interpreter (return a result)
   deopt_frame_manager_return_dtos  = __ pc();
 
   // O0/O1 live
-  __ ba(false, return_from_deopt_common);
+  __ ba(return_from_deopt_common);
   __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_DOUBLE), L3_scratch);    // Result stub address array index
 
   // deopt needs to jump to here to enter the interpreter (return a result)
@@ -1398,7 +1397,7 @@
   __ ld_ptr(STATE(_stack), L1_scratch);                // Get current stack top
   __ sub(L1_scratch, entry_size, L1_scratch);
   __ st_ptr(L1_scratch, STATE(_stack));
-  __ ba(false, entry);
+  __ ba(entry);
   __ delayed()->add(L1_scratch, wordSize, L1_scratch);        // first real entry (undo prepush)
 
   // 2. move expression stack
@@ -1651,7 +1650,7 @@
 
   __ set((int)BytecodeInterpreter::got_monitors, L1_scratch);
   VALIDATE_STATE(G3_scratch, 5);
-  __ ba(false, call_interpreter);
+  __ ba(call_interpreter);
   __ delayed()->st(L1_scratch, STATE(_msg));
 
   // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
@@ -1659,7 +1658,7 @@
 
   // QQQ what message do we send
 
-  __ ba(false, call_interpreter);
+  __ ba(call_interpreter);
   __ delayed()->ld_ptr(STATE(_frame_bottom), SP);                  // restore to full stack frame
 
   //=============================================================================
@@ -1675,7 +1674,7 @@
   // ready to resume the interpreter
 
   __ set((int)BytecodeInterpreter::deopt_resume, L1_scratch);
-  __ ba(false, call_interpreter);
+  __ ba(call_interpreter);
   __ delayed()->st(L1_scratch, STATE(_msg));
 
   // Current frame has caught an exception we need to dispatch to the
@@ -1763,7 +1762,7 @@
 
   // L1_scratch points to top of stack (prepushed)
 
-  __ ba(false, resume_interpreter);
+  __ ba(resume_interpreter);
   __ delayed()->mov(L1_scratch, O1);
 
   // An exception is being caught on return to a vanilla interpreter frame.
@@ -1773,7 +1772,7 @@
 
   __ ld_ptr(STATE(_frame_bottom), SP);                             // restore to full stack frame
   __ ld_ptr(STATE(_stack_base), O1);                               // empty java expression stack
-  __ ba(false, resume_interpreter);
+  __ ba(resume_interpreter);
   __ delayed()->sub(O1, wordSize, O1);                             // account for prepush
 
   // Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
@@ -1852,7 +1851,7 @@
 
   __ set((int)BytecodeInterpreter::method_resume, L1_scratch);
   __ st(L1_scratch, STATE(_msg));
-  __ ba(false, call_interpreter_2);
+  __ ba(call_interpreter_2);
   __ delayed()->st_ptr(O1, STATE(_stack));
 
 
@@ -1867,8 +1866,8 @@
     __ cmp(Gtmp1, O7);                                                // returning to interpreter?
     __ brx(Assembler::equal, true, Assembler::pt, re_dispatch);       // yep
     __ delayed()->nop();
-    __ ba(false, re_dispatch);
-    __ delayed()->mov(G0, prevState);                                   // initial entry
+    __ ba(re_dispatch);
+    __ delayed()->mov(G0, prevState);                                 // initial entry
 
   }
 
@@ -2031,8 +2030,8 @@
   __ brx(Assembler::zero, false, Assembler::pt, unwind_and_forward);
   __ delayed()->nop();
 
-  __ ld_ptr(STATE(_locals), O1);                                   // get result of popping callee's args
-  __ ba(false, unwind_recursive_activation);
+  __ ld_ptr(STATE(_locals), O1); // get result of popping callee's args
+  __ ba(unwind_recursive_activation);
   __ delayed()->nop();
 
   interpreter_frame_manager = entry_point;
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -839,3 +839,9 @@
 }
 
 #endif
+
+intptr_t *frame::initial_deoptimization_info() {
+  // unused... but returns fp() to minimize changes introduced by 7087445
+  return fp();
+}
+
--- a/src/cpu/sparc/vm/frame_sparc.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/frame_sparc.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -259,13 +259,8 @@
   };
 #endif /* CC_INTERP */
 
-  // the compiler frame has many of the same fields as the interpreter frame
-  // %%%%% factor out declarations of the shared fields
   enum compiler_frame_fixed_locals {
-       compiler_frame_d_scratch_fp_offset          = -2,
-       compiler_frame_vm_locals_fp_offset          = -2, // should be same as above
-
-       compiler_frame_vm_local_words = -compiler_frame_vm_locals_fp_offset
+       compiler_frame_vm_locals_fp_offset          = -2
   };
 
  private:
@@ -283,9 +278,6 @@
 
   inline void interpreter_frame_set_tos_address(intptr_t* x);
 
-
-  // %%%%% Another idea: instead of defining 3 fns per item, just define one returning a ref
-
   // monitors:
 
   // next two fns read and write Lmonitors value,
@@ -298,22 +290,8 @@
     return ((interpreterState)sp_at(interpreter_state_ptr_offset));
   }
 
-
 #endif /* CC_INTERP */
 
-
-
- // Compiled frames
-
  public:
-  // Tells if this register can hold 64 bits on V9 (really, V8+).
-  static bool holds_a_doubleword(Register reg) {
-#ifdef _LP64
-    //    return true;
-    return reg->is_out() || reg->is_global();
-#else
-    return reg->is_out() || reg->is_global();
-#endif
-  }
 
 #endif // CPU_SPARC_VM_FRAME_SPARC_HPP
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -236,17 +236,13 @@
     Label L;
     Register thr_state = G3_scratch;
     ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
-    tst(thr_state);
-    br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
-    delayed()->nop();
+    br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
 
     // Initiate earlyret handling only if it is not already being processed.
     // If the flag has the earlyret_processing bit set, it means that this code
     // is called *during* earlyret handling - we don't want to reenter.
     ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
-    cmp(G4_scratch, JvmtiThreadState::earlyret_pending);
-    br(Assembler::notEqual, false, pt, L);
-    delayed()->nop();
+    cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L);
 
     // Call Interpreter::remove_activation_early_entry() to get the address of the
     // same-named entrypoint in the generated interpreter code
@@ -566,9 +562,7 @@
 #ifdef _LP64
   sub(Rtemp, STACK_BIAS, Rtemp);  // Bias Rtemp before cmp to FP
 #endif
-  cmp(Rtemp, FP);
-  brx(Assembler::greaterUnsigned, false, Assembler::pn, Bad);
-  delayed()->nop();
+  cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
 
   // Saved SP must not be ridiculously below current SP.
   size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
@@ -577,12 +571,9 @@
 #ifdef _LP64
   add(Rtemp, STACK_BIAS, Rtemp);  // Unbias Rtemp before cmp to Rsp
 #endif
-  cmp(Rsp, Rtemp);
-  brx(Assembler::lessUnsigned, false, Assembler::pn, Bad);
-  delayed()->nop();
-
-  br(Assembler::always, false, Assembler::pn, OK);
-  delayed()->nop();
+  cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
+
+  ba_short(OK);
 
   bind(Bad);
   stop("on return to interpreted call, restored SP is corrupted");
@@ -630,8 +621,7 @@
 
     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
     ld(interp_only, scratch);
-    tst(scratch);
-    br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
+    cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn);
     delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
     bind(skip_compiled_code);
   }
@@ -641,8 +631,7 @@
 #ifdef ASSERT
   {
     Label ok;
-    br_notnull(target, false, Assembler::pt, ok);
-    delayed()->nop();
+    br_notnull_short(target, Assembler::pt, ok);
     stop("null entry point");
     bind(ok);
   }
@@ -769,6 +758,20 @@
 }
 
 
+void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
+                                                                        Register temp,
+                                                                        Register bytecode,
+                                                                        int byte_no,
+                                                                        int bcp_offset,
+                                                                        size_t index_size) {
+  get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size);
+  ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);
+  const int shift_count = (1 + byte_no) * BitsPerByte;
+  srl( bytecode, shift_count, bytecode);
+  and3(bytecode,        0xFF, bytecode);
+}
+
+
 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
                                                                int bcp_offset, size_t index_size) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
@@ -982,8 +985,7 @@
 
   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
   // is set.
-  tstbool(G1_scratch);
-  br(Assembler::notZero, false, pn, no_unlock);
+  cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock);
   delayed()->nop();
 
   // BasicObjectLock will be first in list, since this is a synchronized method. However, need
@@ -997,8 +999,7 @@
   add( top_most_monitor(), O1 );
 
   ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
-  br_notnull(G3_scratch, false, pt, unlock);
-  delayed()->nop();
+  br_notnull_short(G3_scratch, pt, unlock);
 
   if (throw_monitor_exception) {
     // Entry already unlocked need to throw an exception
@@ -1011,8 +1012,7 @@
     if (install_monitor_exception) {
       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
     }
-    ba(false, unlocked);
-    delayed()->nop();
+    ba_short(unlocked);
   }
 
   bind(unlock);
@@ -1037,15 +1037,13 @@
     add(top_most_monitor(), Rmptr, delta);
     { Label L;
       // ensure that Rmptr starts out above (or at) Rlimit
-      cmp(Rmptr, Rlimit);
-      brx(Assembler::greaterEqualUnsigned, false, pn, L);
-      delayed()->nop();
+      cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
       stop("monitor stack has negative size");
       bind(L);
     }
     #endif
     bind(restart);
-    ba(false, entry);
+    ba(entry);
     delayed()->
     add(top_most_monitor(), Rmptr, delta);      // points to current entry, starting with bottom-most entry
 
@@ -1061,8 +1059,7 @@
       if (install_monitor_exception) {
         MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
       }
-      ba(false, restart);
-      delayed()->nop();
+      ba_short(restart);
     }
 
     bind(loop);
@@ -1073,9 +1070,7 @@
     #ifdef ASSERT
     { Label L;
       // ensure that Rmptr has not somehow stepped below Rlimit
-      cmp(Rmptr, Rlimit);
-      brx(Assembler::greaterEqualUnsigned, false, pn, L);
-      delayed()->nop();
+      cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
       stop("ran off the end of the monitor stack");
       bind(L);
     }
@@ -1196,9 +1191,7 @@
       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
 
     // if the compare and exchange succeeded we are done (we saw an unlocked object)
-    cmp(mark_reg, temp_reg);
-    brx(Assembler::equal, true, Assembler::pt, done);
-    delayed()->nop();
+    cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
 
     // We did not see an unlocked object so try the fast recursive case
 
@@ -1324,13 +1317,7 @@
 
 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
   assert(ProfileInterpreter, "must be profiling interpreter");
-#ifdef _LP64
-  bpr(Assembler::rc_z, false, Assembler::pn, ImethodDataPtr, zero_continue);
-#else
-  tst(ImethodDataPtr);
-  br(Assembler::zero, false, Assembler::pn, zero_continue);
-#endif
-  delayed()->nop();
+  br_null_short(ImethodDataPtr, Assembler::pn, zero_continue);
 }
 
 void InterpreterMacroAssembler::verify_method_data_pointer() {
@@ -1376,31 +1363,18 @@
   Label done;
 
   // if no method data exists, and the counter is high enough, make one
-#ifdef _LP64
-  bpr(Assembler::rc_nz, false, Assembler::pn, ImethodDataPtr, done);
-#else
-  tst(ImethodDataPtr);
-  br(Assembler::notZero, false, Assembler::pn, done);
-#endif
+  br_notnull_short(ImethodDataPtr, Assembler::pn, done);
 
   // Test to see if we should create a method data oop
   AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
-#ifdef _LP64
-  delayed()->nop();
   sethi(profile_limit, Rtmp);
-#else
-  delayed()->sethi(profile_limit, Rtmp);
-#endif
   ld(Rtmp, profile_limit.low10(), Rtmp);
-  cmp(invocation_count, Rtmp);
-  br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
-  delayed()->nop();
+  cmp_and_br_short(invocation_count, Rtmp, Assembler::lessUnsigned, Assembler::pn, profile_continue);
 
   // Build it now.
   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
   set_method_data_pointer_for_bcp();
-  ba(false, profile_continue);
-  delayed()->nop();
+  ba_short(profile_continue);
   bind(done);
 }
 
@@ -1632,13 +1606,10 @@
     Label skip_receiver_profile;
     if (receiver_can_be_null) {
       Label not_null;
-      tst(receiver);
-      brx(Assembler::notZero, false, Assembler::pt, not_null);
-      delayed()->nop();
+      br_notnull_short(receiver, Assembler::pt, not_null);
       // We are making a call.  Increment the count for null receiver.
       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
-      ba(false, skip_receiver_profile);
-      delayed()->nop();
+      ba_short(skip_receiver_profile);
       bind(not_null);
     }
 
@@ -1682,8 +1653,7 @@
     // The receiver is receiver[n].  Increment count[n].
     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
     increment_mdp_data_at(count_offset, scratch);
-    ba(false, done);
-    delayed()->nop();
+    ba_short(done);
     bind(next_test);
 
     if (test_for_null_also) {
@@ -1697,8 +1667,7 @@
           // Receiver did not match any saved receiver and there is no empty row for it.
           // Increment total counter to indicate polymorphic case.
           increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
-          ba(false, done);
-          delayed()->nop();
+          ba_short(done);
           bind(found_null);
         } else {
           brx(Assembler::notZero, false, Assembler::pt, done);
@@ -1729,8 +1698,7 @@
   mov(DataLayout::counter_increment, scratch);
   set_mdp_data_at(count_offset, scratch);
   if (start_row > 0) {
-    ba(false, done);
-    delayed()->nop();
+    ba_short(done);
   }
 }
 
@@ -1772,8 +1740,7 @@
 
       // The method data pointer needs to be updated to reflect the new target.
       update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
-      ba(false, profile_continue);
-      delayed()->nop();
+      ba_short(profile_continue);
       bind(next_test);
     }
 
@@ -1922,8 +1889,8 @@
 
     // untested("monitor stack expansion");
     compute_stack_base(Rtemp);
-    ba( false, start_copying );
-    delayed()->cmp( Rtemp, Rlimit); // done? duplicated below
+    ba(start_copying);
+    delayed()->cmp(Rtemp, Rlimit); // done? duplicated below
 
     // note: must copy from low memory upwards
     // On entry to loop,
@@ -2010,9 +1977,7 @@
   // untested("reg area corruption");
   add(Rindex, offset, Rscratch);
   add(Rlimit, 64 + STACK_BIAS, Rscratch1);
-  cmp(Rscratch, Rscratch1);
-  brx(Assembler::greaterEqualUnsigned, false, pn, L);
-  delayed()->nop();
+  cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L);
   stop("regsave area is being clobbered");
   bind(L);
 }
@@ -2174,9 +2139,7 @@
 
   AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
   load_contents(limit, Rtmp);
-  cmp(backedge_count, Rtmp);
-  br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow);
-  delayed()->nop();
+  cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
 
   // When ProfileInterpreter is on, the backedge_count comes from the
   // methodDataOop, which value does not get reset on the call to
@@ -2196,15 +2159,11 @@
 
   // Was an OSR adapter generated?
   // O0 = osr nmethod
-  tst(O0);
-  brx(Assembler::zero, false, Assembler::pn, overflow_with_error);
-  delayed()->nop();
+  br_null_short(O0, Assembler::pn, overflow_with_error);
 
   // Has the nmethod been invalidated already?
   ld(O0, nmethod::entry_bci_offset(), O2);
-  cmp(O2, InvalidOSREntryBci);
-  br(Assembler::equal, false, Assembler::pn, overflow_with_error);
-  delayed()->nop();
+  cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error);
 
   // migrate the interpreter frame off of the stack
 
@@ -2270,8 +2229,7 @@
   mov(reg, Rtmp);
   const int log2_bytecode_size_limit = 16;
   srl(Rtmp, log2_bytecode_size_limit, Rtmp);
-  br_notnull( Rtmp, false, pt, test );
-  delayed()->nop();
+  br_notnull_short( Rtmp, pt, test );
 
   // %%% should use call_VM_leaf here?
   save_frame_and_mov(0, Lmethod, O0, reg, O1);
@@ -2320,9 +2278,7 @@
     Register temp_reg = O5;
     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
     ld(interp_only, temp_reg);
-    tst(temp_reg);
-    br(zero, false, pt, L);
-    delayed()->nop();
+    cmp_and_br_short(temp_reg, 0, equal, pt, L);
     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
     bind(L);
   }
@@ -2372,9 +2328,7 @@
     Register temp_reg = O5;
     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
     ld(interp_only, temp_reg);
-    tst(temp_reg);
-    br(zero, false, pt, L);
-    delayed()->nop();
+    cmp_and_br_short(temp_reg, 0, equal, pt, L);
 
     // Note: frame::interpreter_frame_result has a dependency on how the
     // method result is saved across the call to post_method_exit. For
--- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -189,6 +189,7 @@
                                   setCCOrNot should_set_CC = dont_set_CC );
 
   void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register temp, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
   void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
   void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
 
--- a/src/cpu/sparc/vm/interpreter_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/interpreter_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -191,22 +191,19 @@
     // Optimization, see if there are any more args and get out prior to checking
     // all 16 float registers.  My guess is that this is rare.
     // If is_register is false, then we are done the first six integer args.
-      __ tst(G4_scratch);
-      __ brx(Assembler::zero, false, Assembler::pt, done);
-      __ delayed()->nop();
-
+      __ br_null_short(G4_scratch, Assembler::pt, done);
     }
-    __ ba(false, NextArg);
+    __ ba(NextArg);
     __ delayed()->srl( G4_scratch, 2, G4_scratch );
 
     __ bind(LoadFloatArg);
     __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4);
-    __ ba(false, NextArg);
+    __ ba(NextArg);
     __ delayed()->srl( G4_scratch, 2, G4_scratch );
 
     __ bind(LoadDoubleArg);
     __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() );
-    __ ba(false, NextArg);
+    __ ba(NextArg);
     __ delayed()->srl( G4_scratch, 2, G4_scratch );
 
     __ bind(NextArg);
@@ -234,8 +231,7 @@
   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true);
   // returns verified_entry_point or NULL
   // we ignore it in any case
-  __ ba(false, Lcontinue);
-  __ delayed()->nop();
+  __ ba_short(Lcontinue);
 
 }
 
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -287,9 +287,7 @@
   BLOCK_COMMENT("verify_clean {");
   // Magic numbers must check out:
   __ set((int32_t) MAGIC_NUMBER_1, O7_temp);
-  __ cmp(O7_temp, L0_magic_number_1);
-  __ br(Assembler::equal, false, Assembler::pt, L_ok_1);
-  __ delayed()->nop();
+  __ cmp_and_br_short(O7_temp, L0_magic_number_1, Assembler::equal, Assembler::pt, L_ok_1);
   __ stop("damaged ricochet frame: MAGIC_NUMBER_1 not found");
 
   __ BIND(L_ok_1);
@@ -301,9 +299,7 @@
 #else
   Register FP_temp = FP;
 #endif
-  __ cmp(L4_saved_args_base, FP_temp);
-  __ br(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok_2);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(L4_saved_args_base, FP_temp, Assembler::greaterEqualUnsigned, Assembler::pt, L_ok_2);
   __ stop("damaged ricochet frame: L4 < FP");
 
   __ BIND(L_ok_2);
@@ -316,15 +312,11 @@
 
   __ BIND(L_ok_3);
   extract_conversion_dest_type(_masm, L5_conversion, O7_temp);
-  __ cmp(O7_temp, T_VOID);
-  __ br(Assembler::equal, false, Assembler::pt, L_ok_4);
-  __ delayed()->nop();
+  __ cmp_and_br_short(O7_temp, T_VOID, Assembler::equal, Assembler::pt, L_ok_4);
   extract_conversion_vminfo(_masm, L5_conversion, O5_temp);
   __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp);
   assert(__ is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
-  __ cmp(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER);
-  __ brx(Assembler::equal, false, Assembler::pt, L_ok_4);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER, Assembler::equal, Assembler::pt, L_ok_4);
   __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found");
   __ BIND(L_ok_4);
   BLOCK_COMMENT("} verify_clean");
@@ -363,9 +355,7 @@
   if (VerifyMethodHandles) {
     Label L_ok, L_bad;
     int32_t stack_move_limit = 0x0800;  // extra-large
-    __ cmp(stack_move_reg, stack_move_limit);
-    __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
-    __ delayed()->nop();
+    __ cmp_and_br_short(stack_move_reg, stack_move_limit, Assembler::greaterEqual, Assembler::pn, L_bad);
     __ cmp(stack_move_reg, -stack_move_limit);
     __ br(Assembler::greater, false, Assembler::pt, L_ok);
     __ delayed()->nop();
@@ -401,13 +391,9 @@
   // Verify that argslot lies within (Gargs, FP].
   Label L_ok, L_bad;
   BLOCK_COMMENT("verify_argslot {");
+  __ cmp_and_brx_short(Gargs, argslot_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad);
   __ add(FP, STACK_BIAS, temp_reg);  // STACK_BIAS is zero on !_LP64
-  __ cmp(argslot_reg, temp_reg);
-  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
-  __ delayed()->nop();
-  __ cmp(Gargs, argslot_reg);
-  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(argslot_reg, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok);
   __ BIND(L_bad);
   __ stop(error_message);
   __ BIND(L_ok);
@@ -434,14 +420,10 @@
   }
   __ add(arg_slot_base_reg, __ argument_offset(arg_slots, temp_reg), temp_reg);
   __ add(FP, STACK_BIAS, temp2_reg);  // STACK_BIAS is zero on !_LP64
-  __ cmp(temp_reg, temp2_reg);
-  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad);
   // Gargs points to the first word so adjust by BytesPerWord
   __ add(arg_slot_base_reg, BytesPerWord, temp_reg);
-  __ cmp(Gargs, temp_reg);
-  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(Gargs, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok);
   __ BIND(L_bad);
   __ stop(error_message);
   __ BIND(L_ok);
@@ -502,21 +484,16 @@
   Label L_ok, L_bad;
   BLOCK_COMMENT("verify_klass {");
   __ verify_oop(obj_reg);
-  __ br_null(obj_reg, false, Assembler::pn, L_bad);
-  __ delayed()->nop();
+  __ br_null_short(obj_reg, Assembler::pn, L_bad);
   __ load_klass(obj_reg, temp_reg);
   __ set(ExternalAddress(klass_addr), temp2_reg);
   __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
-  __ cmp(temp_reg, temp2_reg);
-  __ brx(Assembler::equal, false, Assembler::pt, L_ok);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok);
   intptr_t super_check_offset = klass->super_check_offset();
   __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg);
   __ set(ExternalAddress(klass_addr), temp2_reg);
   __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
-  __ cmp(temp_reg, temp2_reg);
-  __ brx(Assembler::equal, false, Assembler::pt, L_ok);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok);
   __ BIND(L_bad);
   __ stop(error_message);
   __ BIND(L_ok);
@@ -524,6 +501,30 @@
 }
 #endif // ASSERT
 
+
+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp) {
+  assert(method == G5_method, "interpreter calling convention");
+  __ verify_oop(method);
+  __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
+  if (JvmtiExport::can_post_interpreter_events()) {
+    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+    // compiled code in threads for which the event is enabled.  Check here for
+    // interp_only_mode if these events CAN be enabled.
+    __ verify_thread();
+    Label skip_compiled_code;
+
+    const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
+    __ ld(interp_only, temp);
+    __ tst(temp);
+    __ br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
+    __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
+    __ bind(skip_compiled_code);
+  }
+  __ jmp(target, 0);
+  __ delayed()->nop();
+}
+
+
 // Code generation
 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
   // I5_savedSP/O5_savedSP: sender SP (must preserve)
@@ -647,9 +648,7 @@
 #ifdef ASSERT
     {
       Label L_ok;
-      __ cmp(arg_slots.as_register(), 0);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_ok);
-      __ delayed()->nop();
+      __ cmp_and_br_short(arg_slots.as_register(), 0, Assembler::greaterEqual, Assembler::pt, L_ok);
       __ stop("negative arg_slots");
       __ bind(L_ok);
     }
@@ -724,9 +723,7 @@
     __ ld_ptr(           Address(temp_reg, 0     ), temp2_reg);
     __ st_ptr(temp2_reg, Address(temp_reg, offset)           );
     __ add(temp_reg, wordSize, temp_reg);
-    __ cmp(temp_reg, argslot_reg);
-    __ brx(Assembler::lessUnsigned, false, Assembler::pt, loop);
-    __ delayed()->nop();  // FILLME
+    __ cmp_and_brx_short(temp_reg, argslot_reg, Assembler::lessUnsigned, Assembler::pt, loop);
   }
 
   // Now move the argslot down, to point to the opened-up space.
@@ -773,9 +770,7 @@
     __ ld_ptr(           Address(temp_reg, 0     ), temp2_reg);
     __ st_ptr(temp2_reg, Address(temp_reg, offset)           );
     __ sub(temp_reg, wordSize, temp_reg);
-    __ cmp(temp_reg, Gargs);
-    __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_loop);
-    __ delayed()->nop();  // FILLME
+    __ cmp_and_brx_short(temp_reg, Gargs, Assembler::greaterEqualUnsigned, Assembler::pt, L_loop);
   }
 
   // And adjust the argslot address to point at the deletion point.
@@ -824,8 +819,7 @@
     __ delayed()->nop();
     __ ld_ptr(          Address(argslot_reg, 0), temp_reg);
     __ st_ptr(temp_reg, Address(Gargs,       0));
-    __ ba(false, L_break);
-    __ delayed()->nop();  // FILLME
+    __ ba_short(L_break);
     __ BIND(L_plural);
 
     // Loop for 2 or more:
@@ -839,9 +833,7 @@
     __ sub(Gargs,   wordSize, Gargs  );
     __ ld_ptr(           Address(top_reg, 0), temp2_reg);
     __ st_ptr(temp2_reg, Address(Gargs,   0));
-    __ cmp(top_reg, argslot_reg);
-    __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
-    __ delayed()->nop();  // FILLME
+    __ cmp_and_brx_short(top_reg, argslot_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop);
     __ BIND(L_break);
   }
   BLOCK_COMMENT("} push_arg_slots");
@@ -873,17 +865,13 @@
       __ br(Assembler::lessEqual, false, Assembler::pn, L_bad);
       __ delayed()->nop();
     }
-    __ cmp(bottom_reg, top_reg);
-    __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_ok);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
     __ BIND(L_bad);
     __ stop("valid bounds (copy up)");
     __ BIND(L_ok);
   }
 #endif
-  __ cmp(bottom_reg, top_reg);
-  __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pn, L_break);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break);
   // work top down to bottom, copying contiguous data upwards
   // In pseudo-code:
   //   while (--top >= bottom) *(top + distance) = *(top + 0);
@@ -892,9 +880,7 @@
   __ sub(top_reg, wordSize, top_reg);
   __ ld_ptr(           Address(top_reg, 0     ), temp2_reg);
   __ st_ptr(temp2_reg, Address(top_reg, offset)           );
-  __ cmp(top_reg, bottom_reg);
-  __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
-  __ delayed()->nop();  // FILLME
+  __ cmp_and_brx_short(top_reg, bottom_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop);
   assert(Interpreter::stackElementSize == wordSize, "else change loop");
   __ BIND(L_break);
   BLOCK_COMMENT("} move_arg_slots_up");
@@ -927,17 +913,13 @@
       __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
       __ delayed()->nop();
     }
-    __ cmp(bottom_reg, top_reg);
-    __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_ok);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
     __ BIND(L_bad);
     __ stop("valid bounds (copy down)");
     __ BIND(L_ok);
   }
 #endif
-  __ cmp(bottom_reg, top_reg);
-  __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pn, L_break);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break);
   // work bottom up to top, copying contiguous data downwards
   // In pseudo-code:
   //   while (bottom < top) *(bottom - distance) = *(bottom + 0), bottom++;
@@ -946,9 +928,7 @@
   __ ld_ptr(           Address(bottom_reg, 0     ), temp2_reg);
   __ st_ptr(temp2_reg, Address(bottom_reg, offset)           );
   __ add(bottom_reg, wordSize, bottom_reg);
-  __ cmp(bottom_reg, top_reg);
-  __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_loop);
-  __ delayed()->nop();  // FILLME
+  __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_loop);
   assert(Interpreter::stackElementSize == wordSize, "else change loop");
   __ BIND(L_break);
   BLOCK_COMMENT("} move_arg_slots_down");
@@ -1105,9 +1085,6 @@
   guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
 
   // Some handy addresses:
-  Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
-  Address G5_method_fce(    G5_method,        in_bytes(methodOopDesc::from_compiled_offset()));
-
   Address G3_mh_vmtarget(   G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
 
   Address G3_dmh_vmindex(   G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes());
@@ -1136,24 +1113,23 @@
   case _raise_exception:
     {
       // Not a real MH entry, but rather shared code for raising an
-      // exception.  Since we use the compiled entry, arguments are
-      // expected in compiler argument registers.
+      // exception.  For sharing purposes the arguments are passed into registers
+      // and then placed in the intepreter calling convention here.
       assert(raise_exception_method(), "must be set");
       assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
 
-      __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
-
-      Label L_no_method;
-      // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
       __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
       __ ld_ptr(Address(G5_method, 0), G5_method);
 
       const int jobject_oop_offset = 0;
       __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
 
-      __ verify_oop(G5_method);
-      __ jump_indirect_to(G5_method_fce, O3_scratch);  // jump to compiled entry
-      __ delayed()->nop();
+      adjust_SP_and_Gargs_down_by_slots(_masm, 3, noreg, noreg);
+
+      __ st    (O0_code,     __ argument_address(constant(2), noreg, 0));
+      __ st_ptr(O1_actual,   __ argument_address(constant(1), noreg, 0));
+      __ st_ptr(O2_required, __ argument_address(constant(0), noreg, 0));
+      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
     }
     break;
 
@@ -1161,7 +1137,6 @@
   case _invokespecial_mh:
     {
       __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
-      __ verify_oop(G5_method);
       // Same as TemplateTable::invokestatic or invokespecial,
       // minus the CP setup and profiling:
       if (ek == _invokespecial_mh) {
@@ -1171,8 +1146,7 @@
         __ null_check(G3_method_handle);
         __ verify_oop(G3_method_handle);
       }
-      __ jump_indirect_to(G5_method_fie, O1_scratch);
-      __ delayed()->nop();
+      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
     }
     break;
 
@@ -1204,9 +1178,7 @@
       Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
       __ ld_ptr(vtable_entry_addr, G5_method);
 
-      __ verify_oop(G5_method);
-      __ jump_indirect_to(G5_method_fie, O1_scratch);
-      __ delayed()->nop();
+      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
     }
     break;
 
@@ -1237,9 +1209,7 @@
                                  O3_scratch,
                                  no_such_interface);
 
-      __ verify_oop(G5_method);
-      __ jump_indirect_to(G5_method_fie, O1_scratch);
-      __ delayed()->nop();
+      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
 
       __ bind(no_such_interface);
       // Throw an exception.
@@ -1283,9 +1253,7 @@
 
       if (direct_to_method) {
         __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
-        __ verify_oop(G5_method);
-        __ jump_indirect_to(G5_method_fie, O1_scratch);
-        __ delayed()->nop();
+        jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
       } else {
         __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
         __ verify_oop(G3_method_handle);
@@ -1294,6 +1262,15 @@
     }
     break;
 
+  case _adapter_opt_profiling:
+    if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) {
+      Address G3_mh_vmcount(G3_method_handle, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes());
+      __ ld(G3_mh_vmcount, O1_scratch);
+      __ add(O1_scratch, 1, O1_scratch);
+      __ st(O1_scratch, G3_mh_vmcount);
+    }
+    // fall through
+
   case _adapter_retype_only:
   case _adapter_retype_raw:
     // Immediately jump to the next MH layer:
@@ -1317,9 +1294,7 @@
 
       Label L_done;
       __ ld_ptr(vmarg, O2_scratch);
-      __ tst(O2_scratch);
-      __ brx(Assembler::zero, false, Assembler::pn, L_done);  // No cast if null.
-      __ delayed()->nop();
+      __ br_null_short(O2_scratch, Assembler::pn, L_done);  // No cast if null.
       __ load_klass(O2_scratch, O2_scratch);
 
       // Live at this point:
@@ -1424,8 +1399,7 @@
 
       // this path is taken for int->byte, int->short
       __ sra(O1_scratch, G5_vminfo, O1_scratch);
-      __ ba(false, done);
-      __ delayed()->nop();
+      __ ba_short(done);
 
       __ bind(zero_extend);
       // this is taken for int->char
@@ -1848,9 +1822,7 @@
           BLOCK_COMMENT("verify collect_count_constant {");
           __ load_method_handle_vmslots(O3_scratch, G3_method_handle, O2_scratch);
           Label L_count_ok;
-          __ cmp(O3_scratch, collect_count_constant);
-          __ br(Assembler::equal, false, Assembler::pt, L_count_ok);
-          __ delayed()->nop();
+          __ cmp_and_br_short(O3_scratch, collect_count_constant, Assembler::equal, Assembler::pt, L_count_ok);
           __ stop("bad vminfo in AMH.conv");
           __ BIND(L_count_ok);
           BLOCK_COMMENT("} verify collect_count_constant");
@@ -1897,9 +1869,7 @@
           BLOCK_COMMENT("verify dest_slot_constant {");
           extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O3_scratch);
           Label L_vminfo_ok;
-          __ cmp(O3_scratch, dest_slot_constant);
-          __ br(Assembler::equal, false, Assembler::pt, L_vminfo_ok);
-          __ delayed()->nop();
+          __ cmp_and_br_short(O3_scratch, dest_slot_constant, Assembler::equal, Assembler::pt, L_vminfo_ok);
           __ stop("bad vminfo in AMH.conv");
           __ BIND(L_vminfo_ok);
           BLOCK_COMMENT("} verify dest_slot_constant");
@@ -1939,14 +1909,10 @@
       // If there are variable parameters, use dynamic checks to skip around the whole mess.
       Label L_done;
       if (keep3_count.is_register()) {
-        __ tst(keep3_count.as_register());
-        __ br(Assembler::zero, false, Assembler::pn, L_done);
-        __ delayed()->nop();
+        __ cmp_and_br_short(keep3_count.as_register(), 0, Assembler::equal, Assembler::pn, L_done);
       }
       if (close_count.is_register()) {
-        __ cmp(close_count.as_register(), open_count);
-        __ br(Assembler::equal, false, Assembler::pn, L_done);
-        __ delayed()->nop();
+        __ cmp_and_br_short(close_count.as_register(), open_count, Assembler::equal, Assembler::pn, L_done);
       }
 
       if (move_keep3 && fix_arg_base) {
@@ -1987,8 +1953,7 @@
         }
 
         if (emit_guard) {
-          __ ba(false, L_done);  // assumes emit_move_up is true also
-          __ delayed()->nop();
+          __ ba_short(L_done);  // assumes emit_move_up is true also
           __ BIND(L_move_up);
         }
 
@@ -2121,8 +2086,7 @@
 
 #ifdef ASSERT
       { Label L_ok;
-        __ br_notnull(O7_temp, false, Assembler::pt, L_ok);
-        __ delayed()->nop();
+        __ br_notnull_short(O7_temp, Assembler::pt, L_ok);
         __ stop("bad method handle return");
         __ BIND(L_ok);
       }
@@ -2180,11 +2144,10 @@
         Label L_skip;
         if (length_constant < 0) {
           load_conversion_vminfo(_masm, G3_amh_conversion, O3_scratch);
-          __ br_zero(Assembler::notZero, false, Assembler::pn, O3_scratch, L_skip);
-          __ delayed()->nop();
+          __ cmp_zero_and_br(Assembler::notZero, O3_scratch, L_skip);
+          __ delayed()->nop(); // to avoid back-to-back cbcond instructions
         }
-        __ br_null(O1_array, false, Assembler::pn, L_array_is_empty);
-        __ delayed()->nop();
+        __ br_null_short(O1_array, Assembler::pn, L_array_is_empty);
         __ BIND(L_skip);
       }
       __ null_check(O1_array, oopDesc::klass_offset_in_bytes());
@@ -2198,8 +2161,7 @@
       Label L_ok_array_klass, L_bad_array_klass, L_bad_array_length;
       __ check_klass_subtype(O2_array_klass, O3_klass, O4_scratch, G5_scratch, L_ok_array_klass);
       // If we get here, the type check failed!
-      __ ba(false, L_bad_array_klass);
-      __ delayed()->nop();
+      __ ba_short(L_bad_array_klass);
       __ BIND(L_ok_array_klass);
 
       // Check length.
@@ -2235,8 +2197,7 @@
         __ BIND(L_array_is_empty);
         remove_arg_slots(_masm, -stack_move_unit() * array_slots,
                          O0_argslot, O1_scratch, O2_scratch, O3_scratch);
-        __ ba(false, L_args_done);  // no spreading to do
-        __ delayed()->nop();
+        __ ba_short(L_args_done);  // no spreading to do
         __ BIND(L_insert_arg_space);
         // come here in the usual case, stack_move < 0 (2 or more spread arguments)
         // Live: O1_array, O2_argslot_limit, O3_stack_move
@@ -2277,9 +2238,7 @@
                        Address(O1_source, 0), Address(O4_fill_ptr, 0),
                        O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
         __ add(O1_source, type2aelembytes(elem_type), O1_source);
-        __ cmp(O4_fill_ptr, O0_argslot);
-        __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
-        __ delayed()->nop();  // FILLME
+        __ cmp_and_brx_short(O4_fill_ptr, O0_argslot, Assembler::greaterUnsigned, Assembler::pt, L_loop);
       } else if (length_constant == 0) {
         // nothing to copy
       } else {
--- a/src/cpu/sparc/vm/methodHandles_sparc.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/methodHandles_sparc.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -221,4 +221,8 @@
                  "reference is a MH");
   }
 
+  // Similar to InterpreterMacroAssembler::jump_from_interpreted.
+  // Takes care of special dispatch from single stepping too.
+  static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, Register temp2);
+
   static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -600,7 +600,7 @@
 void AdapterGenerator::patch_callers_callsite() {
   Label L;
   __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
-  __ br_null(G3_scratch, false, __ pt, L);
+  __ br_null(G3_scratch, false, Assembler::pt, L);
   // Schedule the branch target address early.
   __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
   // Call into the VM to patch the caller, then jump to compiled callee
@@ -1127,8 +1127,7 @@
       Label loop;
       __ bind(loop);
       __ sub(L0, 1, L0);
-      __ br_null(L0, false, Assembler::pt, loop);
-      __ delayed()->nop();
+      __ br_null_short(L0, Assembler::pt, loop);
 
       __ restore();
     }
@@ -1202,7 +1201,7 @@
     // the call site corrected.
     __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
     __ bind(ok2);
-    __ br_null(G3_scratch, false, __ pt, skip_fixup);
+    __ br_null(G3_scratch, false, Assembler::pt, skip_fixup);
     __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
     __ jump_to(ic_miss, G3_scratch);
     __ delayed()->nop();
@@ -1779,9 +1778,7 @@
     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
     __ verify_oop(O0);
     __ load_klass(O0, temp_reg);
-    __ cmp(temp_reg, G5_inline_cache_reg);
-    __ brx(Assembler::equal, true, Assembler::pt, L);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
 
     __ jump_to(ic_miss, temp_reg);
     __ delayed()->nop();
@@ -2182,8 +2179,7 @@
 #ifdef ASSERT
     { Label L;
     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
-    __ br_null(O0, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ br_null_short(O0, Assembler::pt, L);
     __ stop("no pending exception allowed on exit from IR::monitorenter");
     __ bind(L);
     }
@@ -2298,9 +2294,7 @@
     Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
     __ br(Assembler::notEqual, false, Assembler::pn, L);
     __ delayed()->ld(suspend_state, G3_scratch);
-    __ cmp(G3_scratch, 0);
-    __ br(Assembler::equal, false, Assembler::pt, no_block);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
     __ bind(L);
 
     // Block.  Save any potential method result value before the operation and
@@ -2328,9 +2322,7 @@
 
   Label no_reguard;
   __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
-  __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
-  __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
-  __ delayed()->nop();
+  __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
 
     save_native_result(masm, ret_type, stack_slots);
   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
@@ -2382,8 +2374,7 @@
 #ifdef ASSERT
     { Label L;
     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
-    __ br_null(O0, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ br_null_short(O0, Assembler::pt, L);
     __ stop("no pending exception allowed on exit from IR::monitorexit");
     __ bind(L);
     }
@@ -2639,9 +2630,7 @@
     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
     __ verify_oop(O0);
     __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
-    __ cmp(temp_reg, G5_inline_cache_reg);
-    __ brx(Assembler::equal, true, Assembler::pt, L);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
 
     __ jump_to(ic_miss, temp_reg);
     __ delayed()->nop();
@@ -3143,8 +3132,7 @@
 
   gen_new_frame(masm, deopt);        // allocate an interpreter frame
 
-  __ tst(O4array_size);
-  __ br(Assembler::notZero, false, Assembler::pn, loop);
+  __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
   __ delayed()->add(O3array, wordSize, O3array);
   __ ld_ptr(G3pcs, 0, O7);                      // load final frame new pc
 
@@ -3221,7 +3209,7 @@
   // pc is now in O7. Return values are still in the expected places
 
   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
-  __ ba(false, cont);
+  __ ba(cont);
   __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
 
   int exception_offset = __ offset() - start;
@@ -3256,8 +3244,7 @@
     // verify that there is really an exception oop in exception_oop
     Label has_exception;
     __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
-    __ br_notnull(Oexception, false, Assembler::pt, has_exception);
-    __ delayed()-> nop();
+    __ br_notnull_short(Oexception, Assembler::pt, has_exception);
     __ stop("no exception in thread");
     __ bind(has_exception);
 
@@ -3265,14 +3252,13 @@
     Label no_pending_exception;
     Address exception_addr(G2_thread, Thread::pending_exception_offset());
     __ ld_ptr(exception_addr, Oexception);
-    __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
-    __ delayed()->nop();
+    __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
     __ stop("must not have pending exception here");
     __ bind(no_pending_exception);
   }
 #endif
 
-  __ ba(false, cont);
+  __ ba(cont);
   __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
 
   //
@@ -3313,9 +3299,7 @@
   RegisterSaver::restore_result_registers(masm);
 
   Label noException;
-  __ cmp(G4deopt_mode, Deoptimization::Unpack_exception);   // Was exception pending?
-  __ br(Assembler::notEqual, false, Assembler::pt, noException);
-  __ delayed()->nop();
+  __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
 
   // Move the pending exception from exception_oop to Oexception so
   // the pending exception will be picked up the interpreter.
@@ -3359,9 +3343,7 @@
   // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
   // I0/I1 if the return value is long.
   Label not_long;
-  __ cmp(O0,T_LONG);
-  __ br(Assembler::notEqual, false, Assembler::pt, not_long);
-  __ delayed()->nop();
+  __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
   __ ldd(saved_Greturn1_addr,I0);
   __ bind(not_long);
 #endif
@@ -3534,9 +3516,7 @@
   Label pending;
 
   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
-  __ tst(O1);
-  __ brx(Assembler::notEqual, true, Assembler::pn, pending);
-  __ delayed()->nop();
+  __ br_notnull_short(O1, Assembler::pn, pending);
 
   RegisterSaver::restore_live_registers(masm);
 
@@ -3623,9 +3603,7 @@
   Label pending;
 
   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
-  __ tst(O1);
-  __ brx(Assembler::notEqual, true, Assembler::pn, pending);
-  __ delayed()->nop();
+  __ br_notnull_short(O1, Assembler::pn, pending);
 
   // get the returned methodOop
 
--- a/src/cpu/sparc/vm/sparc.ad	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/sparc.ad	Thu Dec 22 15:46:11 2011 +0000
@@ -425,7 +425,7 @@
 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
 // This class is usable for mis-aligned loads as happen in I2C adapters.
 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
-                   R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31 );
+                   R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
 %}
 
 //----------DEFINITION BLOCK---------------------------------------------------
@@ -460,6 +460,8 @@
 // Must be visible to the DFA in dfa_sparc.cpp
 extern bool can_branch_register( Node *bol, Node *cmp );
 
+extern bool use_block_zeroing(Node* count);
+
 // Macros to extract hi & lo halves from a long pair.
 // G0 is not part of any long pair, so assert on that.
 // Prevents accidentally using G1 instead of G0.
@@ -471,9 +473,6 @@
 source %{
 #define __ _masm.
 
-// Block initializing store
-#define ASI_BLK_INIT_QUAD_LDD_P    0xE2
-
 // tertiary op of a LoadP or StoreP encoding
 #define REGP_OP true
 
@@ -524,6 +523,12 @@
   return false;
 }
 
+bool use_block_zeroing(Node* count) {
+  // Use BIS for zeroing if count is not constant
+  // or it is >= BlockZeroingLowLimit.
+  return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit);
+}
+
 // ****************************************************************************
 
 // REQUIRED FUNCTIONALITY
@@ -835,6 +840,7 @@
           !(n->ideal_Opcode()==Op_ConvI2D   && ld_op==Op_LoadF) &&
           !(n->ideal_Opcode()==Op_PrefetchRead  && ld_op==Op_LoadI) &&
           !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
+          !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) &&
           !(n->ideal_Opcode()==Op_Load2I    && ld_op==Op_LoadD) &&
           !(n->ideal_Opcode()==Op_Load4C    && ld_op==Op_LoadD) &&
           !(n->ideal_Opcode()==Op_Load4S    && ld_op==Op_LoadD) &&
@@ -1326,17 +1332,17 @@
 
   // --------------------------------------
   // Check for float->int copy; requires a trip through memory
-  if( src_first_rc == rc_float && dst_first_rc == rc_int ) {
+  if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) {
     int offset = frame::register_save_words*wordSize;
-    if( cbuf ) {
+    if (cbuf) {
       emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 );
       impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
       impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
       emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 );
     }
 #ifndef PRODUCT
-    else if( !do_size ) {
-      if( size != 0 ) st->print("\n\t");
+    else if (!do_size) {
+      if (size != 0) st->print("\n\t");
       st->print(  "SUB    R_SP,16,R_SP\n");
       impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
       impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
@@ -1346,6 +1352,21 @@
     size += 16;
   }
 
+  // Check for float->int copy on T4
+  if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) {
+    // Further check for aligned-adjacent pair, so we can use a double move
+    if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
+      return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st);
+    size  =  impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st);
+  }
+  // Check for int->float copy on T4
+  if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
+    // Further check for aligned-adjacent pair, so we can use a double move
+    if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
+      return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st);
+    size  =  impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st);
+  }
+
   // --------------------------------------
   // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
   // In such cases, I have to do the big-endian swap.  For aligned targets, the
@@ -1678,7 +1699,6 @@
 
 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
   MacroAssembler _masm(&cbuf);
-  Label L;
   Register G5_ic_reg  = reg_to_register_object(Matcher::inline_cache_reg_encode());
   Register temp_reg   = G3;
   assert( G5_ic_reg != temp_reg, "conflicting registers" );
@@ -1820,8 +1840,10 @@
 //
 // NOTE: If the platform does not provide any short branch variants, then
 //       this method should return false for offset 0.
-bool Matcher::is_short_branch_offset(int rule, int offset) {
-  return false;
+bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
+  // The passed offset is relative to address of the branch.
+  // Don't need to adjust the offset.
+  return UseCBCond && Assembler::is_simm(offset, 12);
 }
 
 const bool Matcher::isSimpleConstant64(jlong value) {
@@ -2300,60 +2322,23 @@
     __ delayed()->nop();
   %}
 
-  enc_class enc_bp( Label labl, cmpOp cmp, flagsReg cc ) %{
-    MacroAssembler _masm(&cbuf);
-    Label &L = *($labl$$label);
-    Assembler::Predict predict_taken =
-      cbuf.is_backward_branch(L) ? Assembler::pt : Assembler::pn;
-
-    __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, L);
-    __ delayed()->nop();
-  %}
-
-  enc_class enc_bpl( Label labl, cmpOp cmp, flagsRegL cc ) %{
+  enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{
     MacroAssembler _masm(&cbuf);
-    Label &L = *($labl$$label);
-    Assembler::Predict predict_taken =
-      cbuf.is_backward_branch(L) ? Assembler::pt : Assembler::pn;
-
-    __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, L);
-    __ delayed()->nop();
-  %}
-
-  enc_class enc_bpx( Label labl, cmpOp cmp, flagsRegP cc ) %{
-    MacroAssembler _masm(&cbuf);
-    Label &L = *($labl$$label);
+    Label* L = $labl$$label;
     Assembler::Predict predict_taken =
-      cbuf.is_backward_branch(L) ? Assembler::pt : Assembler::pn;
-
-    __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, L);
-    __ delayed()->nop();
-  %}
-
-  enc_class enc_fbp( Label labl, cmpOpF cmp, flagsRegF cc ) %{
-    MacroAssembler _masm(&cbuf);
-    Label &L = *($labl$$label);
-    Assembler::Predict predict_taken =
-      cbuf.is_backward_branch(L) ? Assembler::pt : Assembler::pn;
-
-    __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($cc$$reg), predict_taken, L);
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+
+    __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
     __ delayed()->nop();
   %}
 
-  enc_class enc_ba( Label labl ) %{
+  enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{
     MacroAssembler _masm(&cbuf);
-    Label &L = *($labl$$label);
-    __ ba(false, L);
-    __ delayed()->nop();
-  %}
-
-  enc_class enc_bpr( Label labl, cmpOp_reg cmp, iRegI op1 ) %{
-    MacroAssembler _masm(&cbuf);
-    Label &L = *$labl$$label;
+    Label* L = $labl$$label;
     Assembler::Predict predict_taken =
-      cbuf.is_backward_branch(L) ? Assembler::pt : Assembler::pn;
-
-    __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), L);
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+
+    __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L);
     __ delayed()->nop();
   %}
 
@@ -2834,25 +2819,6 @@
     __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
   %}
 
-  // Compiler ensures base is doubleword aligned and cnt is count of doublewords
-  enc_class enc_Clear_Array(iRegX cnt, iRegP base, iRegX temp) %{
-    MacroAssembler _masm(&cbuf);
-    Register    nof_bytes_arg   = reg_to_register_object($cnt$$reg);
-    Register    nof_bytes_tmp    = reg_to_register_object($temp$$reg);
-    Register    base_pointer_arg = reg_to_register_object($base$$reg);
-
-    Label loop;
-    __ mov(nof_bytes_arg, nof_bytes_tmp);
-
-    // Loop and clear, walking backwards through the array.
-    // nof_bytes_tmp (if >0) is always the number of bytes to zero
-    __ bind(loop);
-    __ deccc(nof_bytes_tmp, 8);
-    __ br(Assembler::greaterEqual, true, Assembler::pt, loop);
-    __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
-    // %%%% this mini-loop must not cross a cache boundary!
-  %}
-
 
   enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{
     Label Ldone, Lloop;
@@ -2971,7 +2937,7 @@
     __ brx(Assembler::equal, true, Assembler::pn, Ldone);
     __ delayed()->add(G0, 1, result_reg);
 
-    __ br_on_reg_cond(Assembler::rc_z, true, Assembler::pn, cnt_reg, Ldone);
+    __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn);
     __ delayed()->add(G0, 1, result_reg); // count == 0
 
     //rename registers
@@ -2991,7 +2957,7 @@
     // Compare char[] arrays aligned to 4 bytes.
     __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg,
                           chr1_reg, chr2_reg, Ldone);
-    __ ba(false,Ldone);
+    __ ba(Ldone);
     __ delayed()->add(G0, 1, result_reg);
 
     // char by char compare
@@ -3050,7 +3016,7 @@
     __ br(Assembler::notEqual, true, Assembler::pn, Ldone);
     __ delayed()->mov(G0, result_reg);     // not equal
 
-    __ br_on_reg_cond(Assembler::rc_z, true, Assembler::pn, tmp1_reg, Ldone);
+    __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn);
     __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal
 
     // load array addresses
@@ -3337,10 +3303,10 @@
 
 //----------Instruction Attributes---------------------------------------------
 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
-ins_attrib ins_size(32);       // Required size attribute (in bits)
-ins_attrib ins_pc_relative(0); // Required PC Relative flag
-ins_attrib ins_short_branch(0); // Required flag: is this instruction a
-                                // non-matching short branch variant of some
+ins_attrib ins_size(32);           // Required size attribute (in bits)
+ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back
+ins_attrib ins_short_branch(0);    // Required flag: is this instruction a
+                                   // non-matching short branch variant of some
                                                             // long branch?
 
 //----------OPERANDS-----------------------------------------------------------
@@ -3426,6 +3392,15 @@
   interface(CONST_INTER);
 %}
 
+// Integer Immediate: 5-bit
+operand immI5() %{
+  predicate(Assembler::is_simm(n->get_int(), 5));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // Integer Immediate: 0-bit
 operand immI0() %{
   predicate(n->get_int() == 0);
@@ -3649,6 +3624,15 @@
   interface(CONST_INTER);
 %}
 
+// Integer Immediate: 5-bit
+operand immL5() %{
+  predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm((int)n->get_long(), 5));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // Long Immediate: 13-bit
 operand immL13() %{
   predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L));
@@ -5181,6 +5165,42 @@
     MS  : R;
 %}
 
+// Compare and branch
+pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{
+    instruction_count(2); has_delay_slot;
+    cr    : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    IALU  : R;
+    BR    : R;
+%}
+
+// Compare and branch
+pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{
+    instruction_count(2); has_delay_slot;
+    cr    : E(write);
+    src1  : R(read);
+    IALU  : R;
+    BR    : R;
+%}
+
+// Compare and branch using cbcond
+pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{
+    single_instruction;
+    src1  : E(read);
+    src2  : E(read);
+    IALU  : R;
+    BR    : R;
+%}
+
+// Compare and branch using cbcond
+pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{
+    single_instruction;
+    src1  : E(read);
+    IALU  : R;
+    BR    : R;
+%}
+
 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{
     single_instruction_with_delay_slot;
     cr    : E(read);
@@ -6236,6 +6256,7 @@
 instruct prefetchr( memory mem ) %{
   match( PrefetchRead mem );
   ins_cost(MEMORY_REF_COST);
+  size(4);
 
   format %{ "PREFETCH $mem,0\t! Prefetch read-many" %}
   opcode(Assembler::prefetch_op3);
@@ -6244,9 +6265,9 @@
 %}
 
 instruct prefetchw( memory mem ) %{
-  predicate(AllocatePrefetchStyle != 3 );
   match( PrefetchWrite mem );
   ins_cost(MEMORY_REF_COST);
+  size(4);
 
   format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %}
   opcode(Assembler::prefetch_op3);
@@ -6254,24 +6275,62 @@
   ins_pipe(iload_mem);
 %}
 
-// Use BIS instruction to prefetch.
-instruct prefetchw_bis( memory mem ) %{
-  predicate(AllocatePrefetchStyle == 3);
-  match( PrefetchWrite mem );
-  ins_cost(MEMORY_REF_COST);
-
-  format %{ "STXA   G0,$mem\t! // Block initializing store" %}
-  ins_encode %{
-     Register base = as_Register($mem$$base);
-     int disp = $mem$$disp;
-     if (disp != 0) {
-       __ add(base, AllocatePrefetchStepSize, base);
-     }
-     __ stxa(G0, base, G0, ASI_BLK_INIT_QUAD_LDD_P);
+// Prefetch instructions for allocation.
+
+instruct prefetchAlloc( memory mem ) %{
+  predicate(AllocatePrefetchInstr == 0);
+  match( PrefetchAllocation mem );
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "PREFETCH $mem,2\t! Prefetch allocation" %}
+  opcode(Assembler::prefetch_op3);
+  ins_encode( form3_mem_prefetch_write( mem ) );
+  ins_pipe(iload_mem);
+%}
+
+// Use BIS instruction to prefetch for allocation.
+// Could fault, need space at the end of TLAB.
+instruct prefetchAlloc_bis( iRegP dst ) %{
+  predicate(AllocatePrefetchInstr == 1);
+  match( PrefetchAllocation dst );
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "STXA   [$dst]\t! // Prefetch allocation using BIS" %}
+  ins_encode %{
+    __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
   %}
   ins_pipe(istore_mem_reg);
 %}
 
+// Next code is used for finding next cache line address to prefetch.
+#ifndef _LP64
+instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
+  match(Set dst (CastX2P (AndI (CastP2X src) mask)));
+  ins_cost(DEFAULT_COST);
+  size(4);
+
+  format %{ "AND    $src,$mask,$dst\t! next cache line address" %}
+  ins_encode %{
+    __ and3($src$$Register, $mask$$constant, $dst$$Register);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#else
+instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
+  match(Set dst (CastX2P (AndL (CastP2X src) mask)));
+  ins_cost(DEFAULT_COST);
+  size(4);
+
+  format %{ "AND    $src,$mask,$dst\t! next cache line address" %}
+  ins_encode %{
+    __ and3($src$$Register, $mask$$constant, $dst$$Register);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#endif
+
 //----------Store Instructions-------------------------------------------------
 // Store Byte
 instruct storeB(memory mem, iRegI src) %{
@@ -6629,8 +6688,7 @@
 %}
 
 instruct membar_acquire_lock() %{
-  match(MemBarAcquire);
-  predicate(Matcher::prior_fast_lock(n));
+  match(MemBarAcquireLock);
   ins_cost(0);
 
   size(0);
@@ -6650,8 +6708,7 @@
 %}
 
 instruct membar_release_lock() %{
-  match(MemBarRelease);
-  predicate(Matcher::post_fast_unlock(n));
+  match(MemBarReleaseLock);
   ins_cost(0);
 
   size(0);
@@ -8164,215 +8221,58 @@
   ins_pipe( cadd_cmpltmask );
 %}
 
-//----------Arithmetic Conversion Instructions---------------------------------
-// The conversions operations are all Alpha sorted.  Please keep it that way!
-
-instruct convD2F_reg(regF dst, regD src) %{
-  match(Set dst (ConvD2F src));
-  size(4);
-  format %{ "FDTOS  $src,$dst" %}
-  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
-  ins_encode(form3_opf_rs2D_rdF(src, dst));
-  ins_pipe(fcvtD2F);
-%}
-
-
-// Convert a double to an int in a float register.
-// If the double is a NAN, stuff a zero in instead.
-instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
-  effect(DEF dst, USE src, KILL fcc0);
-  format %{ "FCMPd  fcc0,$src,$src\t! check for NAN\n\t"
-            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
-            "FDTOI  $src,$dst\t! convert in delay slot\n\t"
-            "FITOS  $dst,$dst\t! change NaN/max-int to valid float\n\t"
-            "FSUBs  $dst,$dst,$dst\t! cleared only if nan\n"
-      "skip:" %}
-  ins_encode(form_d2i_helper(src,dst));
-  ins_pipe(fcvtD2I);
-%}
-
-instruct convD2I_reg(stackSlotI dst, regD src) %{
-  match(Set dst (ConvD2I src));
-  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
-  expand %{
-    regF tmp;
-    convD2I_helper(tmp, src);
-    regF_to_stkI(dst, tmp);
-  %}
-%}
-
-// Convert a double to a long in a double register.
-// If the double is a NAN, stuff a zero in instead.
-instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
-  effect(DEF dst, USE src, KILL fcc0);
-  format %{ "FCMPd  fcc0,$src,$src\t! check for NAN\n\t"
-            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
-            "FDTOX  $src,$dst\t! convert in delay slot\n\t"
-            "FXTOD  $dst,$dst\t! change NaN/max-long to valid double\n\t"
-            "FSUBd  $dst,$dst,$dst\t! cleared only if nan\n"
-      "skip:" %}
-  ins_encode(form_d2l_helper(src,dst));
-  ins_pipe(fcvtD2L);
-%}
-
-
-// Double to Long conversion
-instruct convD2L_reg(stackSlotL dst, regD src) %{
-  match(Set dst (ConvD2L src));
-  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
-  expand %{
-    regD tmp;
-    convD2L_helper(tmp, src);
-    regD_to_stkL(dst, tmp);
-  %}
-%}
-
-
-instruct convF2D_reg(regD dst, regF src) %{
-  match(Set dst (ConvF2D src));
-  format %{ "FSTOD  $src,$dst" %}
-  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
-  ins_encode(form3_opf_rs2F_rdD(src, dst));
-  ins_pipe(fcvtF2D);
-%}
-
-
-instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
-  effect(DEF dst, USE src, KILL fcc0);
-  format %{ "FCMPs  fcc0,$src,$src\t! check for NAN\n\t"
-            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
-            "FSTOI  $src,$dst\t! convert in delay slot\n\t"
-            "FITOS  $dst,$dst\t! change NaN/max-int to valid float\n\t"
-            "FSUBs  $dst,$dst,$dst\t! cleared only if nan\n"
-      "skip:" %}
-  ins_encode(form_f2i_helper(src,dst));
-  ins_pipe(fcvtF2I);
-%}
-
-instruct convF2I_reg(stackSlotI dst, regF src) %{
-  match(Set dst (ConvF2I src));
-  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
-  expand %{
-    regF tmp;
-    convF2I_helper(tmp, src);
-    regF_to_stkI(dst, tmp);
-  %}
-%}
-
-
-instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
-  effect(DEF dst, USE src, KILL fcc0);
-  format %{ "FCMPs  fcc0,$src,$src\t! check for NAN\n\t"
-            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
-            "FSTOX  $src,$dst\t! convert in delay slot\n\t"
-            "FXTOD  $dst,$dst\t! change NaN/max-long to valid double\n\t"
-            "FSUBd  $dst,$dst,$dst\t! cleared only if nan\n"
-      "skip:" %}
-  ins_encode(form_f2l_helper(src,dst));
-  ins_pipe(fcvtF2L);
-%}
-
-// Float to Long conversion
-instruct convF2L_reg(stackSlotL dst, regF src) %{
-  match(Set dst (ConvF2L src));
-  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
-  expand %{
-    regD tmp;
-    convF2L_helper(tmp, src);
-    regD_to_stkL(dst, tmp);
-  %}
-%}
-
-
-instruct convI2D_helper(regD dst, regF tmp) %{
-  effect(USE tmp, DEF dst);
-  format %{ "FITOD  $tmp,$dst" %}
-  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
-  ins_encode(form3_opf_rs2F_rdD(tmp, dst));
-  ins_pipe(fcvtI2D);
-%}
-
-instruct convI2D_reg(stackSlotI src, regD dst) %{
-  match(Set dst (ConvI2D src));
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  expand %{
-    regF tmp;
-    stkI_to_regF( tmp, src);
-    convI2D_helper( dst, tmp);
-  %}
-%}
-
-instruct convI2D_mem( regD_low dst, memory mem ) %{
-  match(Set dst (ConvI2D (LoadI mem)));
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
-  format %{ "LDF    $mem,$dst\n\t"
-            "FITOD  $dst,$dst" %}
-  opcode(Assembler::ldf_op3, Assembler::fitod_opf);
-  ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
-  ins_pipe(floadF_mem);
-%}
-
-
-instruct convI2F_helper(regF dst, regF tmp) %{
-  effect(DEF dst, USE tmp);
-  format %{ "FITOS  $tmp,$dst" %}
-  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
-  ins_encode(form3_opf_rs2F_rdF(tmp, dst));
-  ins_pipe(fcvtI2F);
-%}
-
-instruct convI2F_reg( regF dst, stackSlotI src ) %{
-  match(Set dst (ConvI2F src));
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  expand %{
-    regF tmp;
-    stkI_to_regF(tmp,src);
-    convI2F_helper(dst, tmp);
-  %}
-%}
-
-instruct convI2F_mem( regF dst, memory mem ) %{
-  match(Set dst (ConvI2F (LoadI mem)));
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
-  format %{ "LDF    $mem,$dst\n\t"
-            "FITOS  $dst,$dst" %}
-  opcode(Assembler::ldf_op3, Assembler::fitos_opf);
-  ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
-  ins_pipe(floadF_mem);
-%}
-
-
-instruct convI2L_reg(iRegL dst, iRegI src) %{
-  match(Set dst (ConvI2L src));
-  size(4);
-  format %{ "SRA    $src,0,$dst\t! int->long" %}
-  opcode(Assembler::sra_op3, Assembler::arith_op);
-  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+
+//-----------------------------------------------------------------
+// Direct raw moves between float and general registers using VIS3.
+
+//  ins_pipe(faddF_reg);
+instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (MoveF2I src));
+
+  format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %}
+  ins_encode %{
+    __ movstouw($src$$FloatRegister, $dst$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (MoveI2F src));
+
+  format %{ "MOVWTOS $src,$dst\t! MoveI2F" %}
+  ins_encode %{
+    __ movwtos($src$$Register, $dst$$FloatRegister);
+  %}
   ins_pipe(ialu_reg_reg);
 %}
 
-// Zero-extend convert int to long
-instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
-  match(Set dst (AndL (ConvI2L src) mask) );
-  size(4);
-  format %{ "SRL    $src,0,$dst\t! zero-extend int to long" %}
-  opcode(Assembler::srl_op3, Assembler::arith_op);
-  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (MoveD2L src));
+
+  format %{ "MOVDTOX $src,$dst\t! MoveD2L" %}
+  ins_encode %{
+    __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register);
+  %}
   ins_pipe(ialu_reg_reg);
 %}
 
-// Zero-extend long
-instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
-  match(Set dst (AndL src mask) );
-  size(4);
-  format %{ "SRL    $src,0,$dst\t! zero-extend long" %}
-  opcode(Assembler::srl_op3, Assembler::arith_op);
-  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (MoveL2D src));
+
+  format %{ "MOVXTOD $src,$dst\t! MoveL2D" %}
+  ins_encode %{
+    __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg));
+  %}
   ins_pipe(ialu_reg_reg);
 %}
 
+
+// Raw moves between float and general registers using stack.
+
 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{
   match(Set dst (MoveF2I src));
   effect(DEF dst, USE src);
@@ -8427,7 +8327,7 @@
   ins_cost(MEMORY_REF_COST);
 
   size(4);
-  format %{ "STF   $src,$dst\t!MoveF2I" %}
+  format %{ "STF   $src,$dst\t! MoveF2I" %}
   opcode(Assembler::stf_op3);
   ins_encode(simple_form3_mem_reg(dst, src));
   ins_pipe(fstoreF_stk_reg);
@@ -8439,7 +8339,7 @@
   ins_cost(MEMORY_REF_COST);
 
   size(4);
-  format %{ "STW    $src,$dst\t!MoveI2F" %}
+  format %{ "STW    $src,$dst\t! MoveI2F" %}
   opcode(Assembler::stw_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
   ins_pipe(istore_mem_reg);
@@ -8451,7 +8351,7 @@
   ins_cost(MEMORY_REF_COST);
 
   size(4);
-  format %{ "STDF   $src,$dst\t!MoveD2L" %}
+  format %{ "STDF   $src,$dst\t! MoveD2L" %}
   opcode(Assembler::stdf_op3);
   ins_encode(simple_form3_mem_reg(dst, src));
   ins_pipe(fstoreD_stk_reg);
@@ -8463,13 +8363,290 @@
   ins_cost(MEMORY_REF_COST);
 
   size(4);
-  format %{ "STX    $src,$dst\t!MoveL2D" %}
+  format %{ "STX    $src,$dst\t! MoveL2D" %}
   opcode(Assembler::stx_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
   ins_pipe(istore_mem_reg);
 %}
 
 
+//----------Arithmetic Conversion Instructions---------------------------------
+// The conversions operations are all Alpha sorted.  Please keep it that way!
+
+instruct convD2F_reg(regF dst, regD src) %{
+  match(Set dst (ConvD2F src));
+  size(4);
+  format %{ "FDTOS  $src,$dst" %}
+  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
+  ins_encode(form3_opf_rs2D_rdF(src, dst));
+  ins_pipe(fcvtD2F);
+%}
+
+
+// Convert a double to an int in a float register.
+// If the double is a NAN, stuff a zero in instead.
+instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
+  effect(DEF dst, USE src, KILL fcc0);
+  format %{ "FCMPd  fcc0,$src,$src\t! check for NAN\n\t"
+            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
+            "FDTOI  $src,$dst\t! convert in delay slot\n\t"
+            "FITOS  $dst,$dst\t! change NaN/max-int to valid float\n\t"
+            "FSUBs  $dst,$dst,$dst\t! cleared only if nan\n"
+      "skip:" %}
+  ins_encode(form_d2i_helper(src,dst));
+  ins_pipe(fcvtD2I);
+%}
+
+instruct convD2I_stk(stackSlotI dst, regD src) %{
+  match(Set dst (ConvD2I src));
+  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
+  expand %{
+    regF tmp;
+    convD2I_helper(tmp, src);
+    regF_to_stkI(dst, tmp);
+  %}
+%}
+
+instruct convD2I_reg(iRegI dst, regD src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvD2I src));
+  ins_cost(DEFAULT_COST*2 + BRANCH_COST);
+  expand %{
+    regF tmp;
+    convD2I_helper(tmp, src);
+    MoveF2I_reg_reg(dst, tmp);
+  %}
+%}
+
+
+// Convert a double to a long in a double register.
+// If the double is a NAN, stuff a zero in instead.
+instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
+  effect(DEF dst, USE src, KILL fcc0);
+  format %{ "FCMPd  fcc0,$src,$src\t! check for NAN\n\t"
+            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
+            "FDTOX  $src,$dst\t! convert in delay slot\n\t"
+            "FXTOD  $dst,$dst\t! change NaN/max-long to valid double\n\t"
+            "FSUBd  $dst,$dst,$dst\t! cleared only if nan\n"
+      "skip:" %}
+  ins_encode(form_d2l_helper(src,dst));
+  ins_pipe(fcvtD2L);
+%}
+
+instruct convD2L_stk(stackSlotL dst, regD src) %{
+  match(Set dst (ConvD2L src));
+  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
+  expand %{
+    regD tmp;
+    convD2L_helper(tmp, src);
+    regD_to_stkL(dst, tmp);
+  %}
+%}
+
+instruct convD2L_reg(iRegL dst, regD src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvD2L src));
+  ins_cost(DEFAULT_COST*2 + BRANCH_COST);
+  expand %{
+    regD tmp;
+    convD2L_helper(tmp, src);
+    MoveD2L_reg_reg(dst, tmp);
+  %}
+%}
+
+
+instruct convF2D_reg(regD dst, regF src) %{
+  match(Set dst (ConvF2D src));
+  format %{ "FSTOD  $src,$dst" %}
+  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
+  ins_encode(form3_opf_rs2F_rdD(src, dst));
+  ins_pipe(fcvtF2D);
+%}
+
+
+// Convert a float to an int in a float register.
+// If the float is a NAN, stuff a zero in instead.
+instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
+  effect(DEF dst, USE src, KILL fcc0);
+  format %{ "FCMPs  fcc0,$src,$src\t! check for NAN\n\t"
+            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
+            "FSTOI  $src,$dst\t! convert in delay slot\n\t"
+            "FITOS  $dst,$dst\t! change NaN/max-int to valid float\n\t"
+            "FSUBs  $dst,$dst,$dst\t! cleared only if nan\n"
+      "skip:" %}
+  ins_encode(form_f2i_helper(src,dst));
+  ins_pipe(fcvtF2I);
+%}
+
+instruct convF2I_stk(stackSlotI dst, regF src) %{
+  match(Set dst (ConvF2I src));
+  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
+  expand %{
+    regF tmp;
+    convF2I_helper(tmp, src);
+    regF_to_stkI(dst, tmp);
+  %}
+%}
+
+instruct convF2I_reg(iRegI dst, regF src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvF2I src));
+  ins_cost(DEFAULT_COST*2 + BRANCH_COST);
+  expand %{
+    regF tmp;
+    convF2I_helper(tmp, src);
+    MoveF2I_reg_reg(dst, tmp);
+  %}
+%}
+
+
+// Convert a float to a long in a float register.
+// If the float is a NAN, stuff a zero in instead.
+instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
+  effect(DEF dst, USE src, KILL fcc0);
+  format %{ "FCMPs  fcc0,$src,$src\t! check for NAN\n\t"
+            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
+            "FSTOX  $src,$dst\t! convert in delay slot\n\t"
+            "FXTOD  $dst,$dst\t! change NaN/max-long to valid double\n\t"
+            "FSUBd  $dst,$dst,$dst\t! cleared only if nan\n"
+      "skip:" %}
+  ins_encode(form_f2l_helper(src,dst));
+  ins_pipe(fcvtF2L);
+%}
+
+instruct convF2L_stk(stackSlotL dst, regF src) %{
+  match(Set dst (ConvF2L src));
+  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
+  expand %{
+    regD tmp;
+    convF2L_helper(tmp, src);
+    regD_to_stkL(dst, tmp);
+  %}
+%}
+
+instruct convF2L_reg(iRegL dst, regF src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvF2L src));
+  ins_cost(DEFAULT_COST*2 + BRANCH_COST);
+  expand %{
+    regD tmp;
+    convF2L_helper(tmp, src);
+    MoveD2L_reg_reg(dst, tmp);
+  %}
+%}
+
+
+instruct convI2D_helper(regD dst, regF tmp) %{
+  effect(USE tmp, DEF dst);
+  format %{ "FITOD  $tmp,$dst" %}
+  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
+  ins_encode(form3_opf_rs2F_rdD(tmp, dst));
+  ins_pipe(fcvtI2D);
+%}
+
+instruct convI2D_stk(stackSlotI src, regD dst) %{
+  match(Set dst (ConvI2D src));
+  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
+  expand %{
+    regF tmp;
+    stkI_to_regF(tmp, src);
+    convI2D_helper(dst, tmp);
+  %}
+%}
+
+instruct convI2D_reg(regD_low dst, iRegI src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvI2D src));
+  expand %{
+    regF tmp;
+    MoveI2F_reg_reg(tmp, src);
+    convI2D_helper(dst, tmp);
+  %}
+%}
+
+instruct convI2D_mem(regD_low dst, memory mem) %{
+  match(Set dst (ConvI2D (LoadI mem)));
+  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
+  size(8);
+  format %{ "LDF    $mem,$dst\n\t"
+            "FITOD  $dst,$dst" %}
+  opcode(Assembler::ldf_op3, Assembler::fitod_opf);
+  ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
+  ins_pipe(floadF_mem);
+%}
+
+
+instruct convI2F_helper(regF dst, regF tmp) %{
+  effect(DEF dst, USE tmp);
+  format %{ "FITOS  $tmp,$dst" %}
+  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
+  ins_encode(form3_opf_rs2F_rdF(tmp, dst));
+  ins_pipe(fcvtI2F);
+%}
+
+instruct convI2F_stk(regF dst, stackSlotI src) %{
+  match(Set dst (ConvI2F src));
+  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
+  expand %{
+    regF tmp;
+    stkI_to_regF(tmp,src);
+    convI2F_helper(dst, tmp);
+  %}
+%}
+
+instruct convI2F_reg(regF dst, iRegI src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvI2F src));
+  ins_cost(DEFAULT_COST);
+  expand %{
+    regF tmp;
+    MoveI2F_reg_reg(tmp, src);
+    convI2F_helper(dst, tmp);
+  %}
+%}
+
+instruct convI2F_mem( regF dst, memory mem ) %{
+  match(Set dst (ConvI2F (LoadI mem)));
+  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
+  size(8);
+  format %{ "LDF    $mem,$dst\n\t"
+            "FITOS  $dst,$dst" %}
+  opcode(Assembler::ldf_op3, Assembler::fitos_opf);
+  ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
+  ins_pipe(floadF_mem);
+%}
+
+
+instruct convI2L_reg(iRegL dst, iRegI src) %{
+  match(Set dst (ConvI2L src));
+  size(4);
+  format %{ "SRA    $src,0,$dst\t! int->long" %}
+  opcode(Assembler::sra_op3, Assembler::arith_op);
+  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Zero-extend convert int to long
+instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
+  match(Set dst (AndL (ConvI2L src) mask) );
+  size(4);
+  format %{ "SRL    $src,0,$dst\t! zero-extend int to long" %}
+  opcode(Assembler::srl_op3, Assembler::arith_op);
+  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Zero-extend long
+instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
+  match(Set dst (AndL src mask) );
+  size(4);
+  format %{ "SRL    $src,0,$dst\t! zero-extend long" %}
+  opcode(Assembler::srl_op3, Assembler::arith_op);
+  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+  ins_pipe(ialu_reg_reg);
+%}
+
+
 //-----------
 // Long to Double conversion using V8 opcodes.
 // Still useful because cheetah traps and becomes
@@ -8589,7 +8766,7 @@
   ins_pipe(fcvtL2D);
 %}
 
-instruct convL2D_reg_fast_fxtof(regD dst, stackSlotL src) %{
+instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{
   predicate(VM_Version::has_fast_fxtof());
   match(Set dst (ConvL2D src));
   ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST);
@@ -8600,10 +8777,15 @@
   %}
 %}
 
-//-----------
-// Long to Float conversion using V8 opcodes.
-// Still useful because cheetah traps and becomes
-// amazingly slow for some common numbers.
+instruct convL2D_reg(regD dst, iRegL src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvL2D src));
+  expand %{
+    regD tmp;
+    MoveL2D_reg_reg(tmp, src);
+    convL2D_helper(dst, tmp);
+  %}
+%}
 
 // Long to Float conversion using fast fxtof
 instruct convL2F_helper(regF dst, regD tmp) %{
@@ -8615,7 +8797,7 @@
   ins_pipe(fcvtL2F);
 %}
 
-instruct convL2F_reg_fast_fxtof(regF dst, stackSlotL src) %{
+instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{
   match(Set dst (ConvL2F src));
   ins_cost(DEFAULT_COST + MEMORY_REF_COST);
   expand %{
@@ -8624,6 +8806,18 @@
     convL2F_helper(dst, tmp);
   %}
 %}
+
+instruct convL2F_reg(regF dst, iRegL src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvL2F src));
+  ins_cost(DEFAULT_COST);
+  expand %{
+    regD tmp;
+    MoveL2D_reg_reg(tmp, src);
+    convL2F_helper(dst, tmp);
+  %}
+%}
+
 //-----------
 
 instruct convL2I_reg(iRegI dst, iRegL src) %{
@@ -9068,7 +9262,6 @@
     __ jmp(label_reg, G0);
     __ delayed()->nop();
   %}
-  ins_pc_relative(1);
   ins_pipe(ialu_reg_reg);
 %}
 
@@ -9080,13 +9273,33 @@
   size(8);
   ins_cost(BRANCH_COST);
   format %{ "BA     $labl" %}
-  // Prim = bits 24-22, Secnd = bits 31-30, Tert = cond
-  opcode(Assembler::br_op2, Assembler::branch_op, Assembler::always);
-  ins_encode( enc_ba( labl ) );
-  ins_pc_relative(1);
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ ba(*L);
+    __ delayed()->nop();
+  %}
   ins_pipe(br);
 %}
 
+// Direct Branch, short with no delay slot
+instruct branch_short(label labl) %{
+  match(Goto);
+  predicate(UseCBCond);
+  effect(USE labl);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "BA     $labl\t! short branch" %}
+  ins_encode %{ 
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ ba_short(*L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_imm);
+%}
+
 // Conditional Direct Branch
 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
   match(If cmp icc);
@@ -9097,10 +9310,539 @@
   format %{ "BP$cmp   $icc,$labl" %}
   // Prim = bits 24-22, Secnd = bits 31-30
   ins_encode( enc_bp( labl, cmp, icc ) );
-  ins_pc_relative(1);
+  ins_pipe(br_cc);
+%}
+
+instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
+  match(If cmp icc);
+  effect(USE labl);
+
+  ins_cost(BRANCH_COST);
+  format %{ "BP$cmp  $icc,$labl" %}
+  // Prim = bits 24-22, Secnd = bits 31-30
+  ins_encode( enc_bp( labl, cmp, icc ) );
+  ins_pipe(br_cc);
+%}
+
+instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
+  match(If cmp pcc);
+  effect(USE labl);
+
+  size(8);
+  ins_cost(BRANCH_COST);
+  format %{ "BP$cmp  $pcc,$labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+
+    __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(br_cc);
+%}
+
+instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
+  match(If cmp fcc);
+  effect(USE labl);
+
+  size(8);
+  ins_cost(BRANCH_COST);
+  format %{ "FBP$cmp $fcc,$labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+
+    __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(br_fcc);
+%}
+
+instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
+  match(CountedLoopEnd cmp icc);
+  effect(USE labl);
+
+  size(8);
+  ins_cost(BRANCH_COST);
+  format %{ "BP$cmp   $icc,$labl\t! Loop end" %}
+  // Prim = bits 24-22, Secnd = bits 31-30
+  ins_encode( enc_bp( labl, cmp, icc ) );
+  ins_pipe(br_cc);
+%}
+
+instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
+  match(CountedLoopEnd cmp icc);
+  effect(USE labl);
+
+  size(8);
+  ins_cost(BRANCH_COST);
+  format %{ "BP$cmp  $icc,$labl\t! Loop end" %}
+  // Prim = bits 24-22, Secnd = bits 31-30
+  ins_encode( enc_bp( labl, cmp, icc ) );
   ins_pipe(br_cc);
 %}
 
+// Compare and branch instructions
+instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
+  match(If cmp (CmpI op1 op2));
+  effect(USE labl, KILL icc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! int\n\t"
+            "BP$cmp   $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$Register);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_reg);
+%}
+
+instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
+  match(If cmp (CmpI op1 op2));
+  effect(USE labl, KILL icc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! int\n\t"
+            "BP$cmp   $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$constant);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_imm);
+%}
+
+instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
+  match(If cmp (CmpU op1 op2));
+  effect(USE labl, KILL icc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! unsigned\n\t"
+            "BP$cmp  $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$Register);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_reg);
+%}
+
+instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
+  match(If cmp (CmpU op1 op2));
+  effect(USE labl, KILL icc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! unsigned\n\t"
+            "BP$cmp  $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$constant);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_imm);
+%}
+
+instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
+  match(If cmp (CmpL op1 op2));
+  effect(USE labl, KILL xcc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! long\n\t"
+            "BP$cmp   $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$Register);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_reg);
+%}
+
+instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
+  match(If cmp (CmpL op1 op2));
+  effect(USE labl, KILL xcc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! long\n\t"
+            "BP$cmp   $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$constant);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_imm);
+%}
+
+// Compare Pointers and branch
+instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
+  match(If cmp (CmpP op1 op2));
+  effect(USE labl, KILL pcc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! ptr\n\t"
+            "B$cmp   $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$Register);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_reg);
+%}
+
+instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
+  match(If cmp (CmpP op1 null));
+  effect(USE labl, KILL pcc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,0\t! ptr\n\t"
+            "B$cmp   $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, G0);
+    // bpr() is not used here since it has shorter distance.
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_reg);
+%}
+
+instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
+  match(If cmp (CmpN op1 op2));
+  effect(USE labl, KILL icc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! compressed ptr\n\t"
+            "BP$cmp   $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$Register);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_reg);
+%}
+
+instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
+  match(If cmp (CmpN op1 null));
+  effect(USE labl, KILL icc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,0\t! compressed ptr\n\t"
+            "BP$cmp   $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, G0);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_reg);
+%}
+
+// Loop back branch
+instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
+  match(CountedLoopEnd cmp (CmpI op1 op2));
+  effect(USE labl, KILL icc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! int\n\t"
+            "BP$cmp   $labl\t! Loop end" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$Register);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_reg);
+%}
+
+instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
+  match(CountedLoopEnd cmp (CmpI op1 op2));
+  effect(USE labl, KILL icc);
+
+  size(12);
+  ins_cost(BRANCH_COST);
+  format %{ "CMP    $op1,$op2\t! int\n\t"
+            "BP$cmp   $labl\t! Loop end" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+    __ cmp($op1$$Register, $op2$$constant);
+    __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
+  ins_pipe(cmp_br_reg_imm);
+%}
+
+// Short compare and branch instructions
+instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
+  match(If cmp (CmpI op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL icc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CWB$cmp  $op1,$op2,$labl\t! int" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_reg);
+%}
+
+instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
+  match(If cmp (CmpI op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL icc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CWB$cmp  $op1,$op2,$labl\t! int" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_imm);
+%}
+
+instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
+  match(If cmp (CmpU op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL icc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_reg);
+%}
+
+instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
+  match(If cmp (CmpU op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL icc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_imm);
+%}
+
+instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
+  match(If cmp (CmpL op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL xcc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CXB$cmp  $op1,$op2,$labl\t! long" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_reg);
+%}
+
+instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
+  match(If cmp (CmpL op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL xcc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CXB$cmp  $op1,$op2,$labl\t! long" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_imm);
+%}
+
+// Compare Pointers and branch
+instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
+  match(If cmp (CmpP op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL pcc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+#ifdef _LP64
+  format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
+#else
+  format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
+#endif
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_reg);
+%}
+
+instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
+  match(If cmp (CmpP op1 null));
+  predicate(UseCBCond);
+  effect(USE labl, KILL pcc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+#ifdef _LP64
+  format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
+#else
+  format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
+#endif
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_reg);
+%}
+
+instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
+  match(If cmp (CmpN op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL icc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CWB$cmp  $op1,op2,$labl\t! compressed ptr" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_reg);
+%}
+
+instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
+  match(If cmp (CmpN op1 null));
+  predicate(UseCBCond);
+  effect(USE labl, KILL icc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CWB$cmp  $op1,0,$labl\t! compressed ptr" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_reg);
+%}
+
+// Loop back branch
+instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
+  match(CountedLoopEnd cmp (CmpI op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL icc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CWB$cmp  $op1,$op2,$labl\t! Loop end" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_reg);
+%}
+
+instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
+  match(CountedLoopEnd cmp (CmpI op1 op2));
+  predicate(UseCBCond);
+  effect(USE labl, KILL icc);
+
+  size(4);
+  ins_cost(BRANCH_COST);
+  format %{ "CWB$cmp  $op1,$op2,$labl\t! Loop end" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    assert(__ use_cbcond(*L), "back to back cbcond");
+    __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
+  %}
+  ins_short_branch(1);
+  ins_avoid_back_to_back(1);
+  ins_pipe(cbcond_reg_imm);
+%}
+
 // Branch-on-register tests all 64 bits.  We assume that values
 // in 64-bit registers always remains zero or sign extended
 // unless our code munges the high bits.  Interrupts can chop
@@ -9114,7 +9856,6 @@
   ins_cost(BRANCH_COST);
   format %{ "BR$cmp   $op1,$labl" %}
   ins_encode( enc_bpr( labl, cmp, op1 ) );
-  ins_pc_relative(1);
   ins_pipe(br_reg);
 %}
 
@@ -9127,7 +9868,6 @@
   ins_cost(BRANCH_COST);
   format %{ "BR$cmp   $op1,$labl" %}
   ins_encode( enc_bpr( labl, cmp, op1 ) );
-  ins_pc_relative(1);
   ins_pipe(br_reg);
 %}
 
@@ -9140,72 +9880,9 @@
   ins_cost(BRANCH_COST);
   format %{ "BR$cmp   $op1,$labl" %}
   ins_encode( enc_bpr( labl, cmp, op1 ) );
-  ins_pc_relative(1);
   ins_pipe(br_reg);
 %}
 
-instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
-  match(If cmp icc);
-  effect(USE labl);
-
-  format %{ "BP$cmp  $icc,$labl" %}
-  // Prim = bits 24-22, Secnd = bits 31-30
-  ins_encode( enc_bp( labl, cmp, icc ) );
-  ins_pc_relative(1);
-  ins_pipe(br_cc);
-%}
-
-instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
-  match(If cmp pcc);
-  effect(USE labl);
-
-  size(8);
-  ins_cost(BRANCH_COST);
-  format %{ "BP$cmp  $pcc,$labl" %}
-  // Prim = bits 24-22, Secnd = bits 31-30
-  ins_encode( enc_bpx( labl, cmp, pcc ) );
-  ins_pc_relative(1);
-  ins_pipe(br_cc);
-%}
-
-instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
-  match(If cmp fcc);
-  effect(USE labl);
-
-  size(8);
-  ins_cost(BRANCH_COST);
-  format %{ "FBP$cmp $fcc,$labl" %}
-  // Prim = bits 24-22, Secnd = bits 31-30
-  ins_encode( enc_fbp( labl, cmp, fcc ) );
-  ins_pc_relative(1);
-  ins_pipe(br_fcc);
-%}
-
-instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
-  match(CountedLoopEnd cmp icc);
-  effect(USE labl);
-
-  size(8);
-  ins_cost(BRANCH_COST);
-  format %{ "BP$cmp   $icc,$labl\t! Loop end" %}
-  // Prim = bits 24-22, Secnd = bits 31-30
-  ins_encode( enc_bp( labl, cmp, icc ) );
-  ins_pc_relative(1);
-  ins_pipe(br_cc);
-%}
-
-instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
-  match(CountedLoopEnd cmp icc);
-  effect(USE labl);
-
-  size(8);
-  ins_cost(BRANCH_COST);
-  format %{ "BP$cmp  $icc,$labl\t! Loop end" %}
-  // Prim = bits 24-22, Secnd = bits 31-30
-  ins_encode( enc_bp( labl, cmp, icc ) );
-  ins_pc_relative(1);
-  ins_pipe(br_cc);
-%}
 
 // ============================================================================
 // Long Compare
@@ -9235,9 +9912,14 @@
   size(8);
   ins_cost(BRANCH_COST);
   format %{ "BP$cmp   $xcc,$labl" %}
-  // Prim = bits 24-22, Secnd = bits 31-30
-  ins_encode( enc_bpl( labl, cmp, xcc ) );
-  ins_pc_relative(1);
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Predict predict_taken =
+      cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
+
+    __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
+    __ delayed()->nop();
+  %}
   ins_pipe(br_cc);
 %}
 
@@ -9365,7 +10047,6 @@
   ins_cost(CALL_COST);
   format %{ "CALL,static  ; NOP ==> " %}
   ins_encode( Java_Static_Call( meth ), call_epilog );
-  ins_pc_relative(1);
   ins_pipe(simple_call);
 %}
 
@@ -9375,11 +10056,10 @@
   predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
   effect(USE meth, KILL l7_mh_SP_save);
 
-  size(8);
+  size(16);
   ins_cost(CALL_COST);
   format %{ "CALL,static/MethodHandle" %}
   ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
-  ins_pc_relative(1);
   ins_pipe(simple_call);
 %}
 
@@ -9392,7 +10072,6 @@
   format %{ "SET    (empty),R_G5\n\t"
             "CALL,dynamic  ; NOP ==> " %}
   ins_encode( Java_Dynamic_Call( meth ), call_epilog );
-  ins_pc_relative(1);
   ins_pipe(call);
 %}
 
@@ -9404,7 +10083,6 @@
   format %{ "CALL,runtime" %}
   ins_encode( Java_To_Runtime( meth ),
               call_epilog, adjust_long_from_native_call );
-  ins_pc_relative(1);
   ins_pipe(simple_call);
 %}
 
@@ -9417,7 +10095,6 @@
   ins_encode( Java_To_Runtime( meth ),
               call_epilog,
               adjust_long_from_native_call );
-  ins_pc_relative(1);
   ins_pipe(simple_call);
 %}
 
@@ -9430,7 +10107,6 @@
   ins_encode( Java_To_Runtime( meth ),
               call_epilog,
               adjust_long_from_native_call );
-  ins_pc_relative(1);
   ins_pipe(simple_call);
 %}
 
@@ -9555,7 +10231,6 @@
   effect(KILL scratch, TEMP scratch2);
   ins_cost(100);
 
-  size(4*112);       // conservative overestimation ...
   format %{ "FASTLOCK  $object, $box; KILL $scratch, $scratch2, $box" %}
   ins_encode( Fast_Lock(object, box, scratch, scratch2) );
   ins_pipe(long_memory_op);
@@ -9567,15 +10242,14 @@
   effect(KILL scratch, TEMP scratch2);
   ins_cost(100);
 
-  size(4*120);       // conservative overestimation ...
   format %{ "FASTUNLOCK  $object, $box; KILL $scratch, $scratch2, $box" %}
   ins_encode( Fast_Unlock(object, box, scratch, scratch2) );
   ins_pipe(long_memory_op);
 %}
 
-// Count and Base registers are fixed because the allocator cannot
-// kill unknown registers.  The encodings are generic.
+// The encodings are generic.
 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{
+  predicate(!use_block_zeroing(n->in(2)) );
   match(Set dummy (ClearArray cnt base));
   effect(TEMP temp, KILL ccr);
   ins_cost(300);
@@ -9583,7 +10257,71 @@
     "loop:   SUBcc  $temp,8,$temp\t! Count down a dword of bytes\n"
     "        BRge   loop\t\t! Clearing loop\n"
     "        STX    G0,[$base+$temp]\t! delay slot" %}
-  ins_encode( enc_Clear_Array(cnt, base, temp) );
+
+  ins_encode %{
+    // Compiler ensures base is doubleword aligned and cnt is count of doublewords
+    Register nof_bytes_arg    = $cnt$$Register;
+    Register nof_bytes_tmp    = $temp$$Register;
+    Register base_pointer_arg = $base$$Register;
+
+    Label loop;
+    __ mov(nof_bytes_arg, nof_bytes_tmp);
+
+    // Loop and clear, walking backwards through the array.
+    // nof_bytes_tmp (if >0) is always the number of bytes to zero
+    __ bind(loop);
+    __ deccc(nof_bytes_tmp, 8);
+    __ br(Assembler::greaterEqual, true, Assembler::pt, loop);
+    __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
+    // %%%% this mini-loop must not cross a cache boundary!
+  %}
+  ins_pipe(long_memory_op);
+%}
+
+instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{
+  predicate(use_block_zeroing(n->in(2)));
+  match(Set dummy (ClearArray cnt base));
+  effect(USE_KILL cnt, USE_KILL base, KILL ccr);
+  ins_cost(300);
+  format %{ "CLEAR  [$base, $cnt]\t! ClearArray" %}
+
+  ins_encode %{
+
+    assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
+    Register to    = $base$$Register;
+    Register count = $cnt$$Register;
+
+    Label Ldone;
+    __ nop(); // Separate short branches
+    // Use BIS for zeroing (temp is not used).
+    __ bis_zeroing(to, count, G0, Ldone);
+    __ bind(Ldone);
+
+  %}
+  ins_pipe(long_memory_op);
+%}
+
+instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{
+  predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit));
+  match(Set dummy (ClearArray cnt base));
+  effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr);
+  ins_cost(300);
+  format %{ "CLEAR  [$base, $cnt]\t! ClearArray" %}
+
+  ins_encode %{
+
+    assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
+    Register to    = $base$$Register;
+    Register count = $cnt$$Register;
+    Register temp  = $tmp$$Register;
+
+    Label Ldone;
+    __ nop(); // Separate short branches
+    // Use BIS for zeroing
+    __ bis_zeroing(to, count, temp, Ldone);
+    __ bind(Ldone);
+
+  %}
   ins_pipe(long_memory_op);
 %}
 
@@ -9738,7 +10476,7 @@
   ins_pipe(ialu_reg);
 %}
 
-instruct countTrailingZerosL(iRegI dst, iRegL src, flagsReg cr) %{
+instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{
   predicate(UsePopCountInstruction);  // See Matcher::match_rule_supported
   match(Set dst (CountTrailingZerosL src));
   effect(TEMP dst, KILL cr);
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -150,8 +150,7 @@
     { const Register t = G3_scratch;
       Label L;
       __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
-      __ br_null(t, false, Assembler::pt, L);
-      __ delayed()->nop();
+      __ br_null_short(t, Assembler::pt, L);
       __ stop("StubRoutines::call_stub: entered with pending exception");
       __ bind(L);
     }
@@ -207,8 +206,7 @@
       Label exit;
       __ ld_ptr(parameter_size.as_in().as_address(), cnt);      // parameter counter
       __ add( FP, STACK_BIAS, dst );
-      __ tst(cnt);
-      __ br(Assembler::zero, false, Assembler::pn, exit);
+      __ cmp_zero_and_br(Assembler::zero, cnt, exit);
       __ delayed()->sub(dst, BytesPerWord, dst);                 // setup Lentry_args
 
       // copy parameters if any
@@ -282,20 +280,20 @@
       __ delayed()->restore();
 
       __ BIND(is_object);
-      __ ba(false, exit);
+      __ ba(exit);
       __ delayed()->st_ptr(O0, addr, G0);
 
       __ BIND(is_float);
-      __ ba(false, exit);
+      __ ba(exit);
       __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
 
       __ BIND(is_double);
-      __ ba(false, exit);
+      __ ba(exit);
       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
 
       __ BIND(is_long);
 #ifdef _LP64
-      __ ba(false, exit);
+      __ ba(exit);
       __ delayed()->st_long(O0, addr, G0);      // store entire long
 #else
 #if defined(COMPILER2)
@@ -307,11 +305,11 @@
   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 
-      __ ba(false, exit);
+      __ ba(exit);
       __ delayed()->stx(G1, addr, G0);  // store entire long
 #else
       __ st(O1, addr, BytesPerInt);
-      __ ba(false, exit);
+      __ ba(exit);
       __ delayed()->st(O0, addr, G0);
 #endif /* COMPILER2 */
 #endif /* _LP64 */
@@ -382,8 +380,7 @@
     // make sure that this code is only executed if there is a pending exception
     { Label L;
       __ ld_ptr(exception_addr, Gtemp);
-      __ br_notnull(Gtemp, false, Assembler::pt, L);
-      __ delayed()->nop();
+      __ br_notnull_short(Gtemp, Assembler::pt, L);
       __ stop("StubRoutines::forward exception: no pending exception (1)");
       __ bind(L);
     }
@@ -406,8 +403,7 @@
 #ifdef ASSERT
     // make sure exception is set
     { Label L;
-      __ br_notnull(Oexception, false, Assembler::pt, L);
-      __ delayed()->nop();
+      __ br_notnull_short(Oexception, Assembler::pt, L);
       __ stop("StubRoutines::forward exception: no pending exception (2)");
       __ bind(L);
     }
@@ -440,7 +436,7 @@
 #undef __
 #define __ masm->
 
-  address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
+  address generate_throw_exception(const char* name, address runtime_entry,
                                    Register arg1 = noreg, Register arg2 = noreg) {
 #ifdef ASSERT
     int insts_size = VerifyThread ? 1 * K : 600;
@@ -466,11 +462,6 @@
 
     int frame_complete = __ offset();
 
-    if (restore_saved_exception_pc) {
-      __ ld_ptr(G2_thread, JavaThread::saved_exception_pc_offset(), I7);
-      __ sub(I7, frame::pc_return_offset, I7);
-    }
-
     // Note that we always have a runtime stub frame on the top of stack by this point
     Register last_java_sp = SP;
     // 64-bit last_java_sp is biased!
@@ -501,8 +492,7 @@
     Address exception_addr(G2_thread, Thread::pending_exception_offset());
     Register scratch_reg = Gtemp;
     __ ld_ptr(exception_addr, scratch_reg);
-    __ br_notnull(scratch_reg, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ br_notnull_short(scratch_reg, Assembler::pt, L);
     __ should_not_reach_here();
     __ bind(L);
 #endif // ASSERT
@@ -614,9 +604,7 @@
     __ mov(G0,yield_reg);
 
     __ BIND(retry);
-    __ cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
-    __ br(Assembler::less, false, Assembler::pt, dontyield);
-    __ delayed()->nop();
+    __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
 
     // This code can only be called from inside the VM, this
     // stub is only invoked from Atomic::add().  We do not
@@ -676,9 +664,7 @@
       // try to replace O2 with O3
       __ cas_under_lock(O1, O2, O3,
       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
-      __ cmp(O2, O3);
-      __ br(Assembler::notEqual, false, Assembler::pn, retry);
-      __ delayed()->nop();
+      __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 
       __ retl(false);
       __ delayed()->mov(O2, O0);  // report previous value to caller
@@ -798,11 +784,9 @@
       __ BIND(retry);
 
       __ lduw(O1, 0, O2);
-      __ add(O0,   O2, O3);
-      __ cas(O1,   O2, O3);
-      __ cmp(      O2, O3);
-      __ br(Assembler::notEqual, false, Assembler::pn, retry);
-      __ delayed()->nop();
+      __ add(O0, O2, O3);
+      __ cas(O1, O2, O3);
+      __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
       __ retl(false);
       __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
     } else {
@@ -1135,6 +1119,126 @@
     }
   }
 
+  //
+  // Generate main code for disjoint arraycopy
+  //
+  typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec,
+                                              Label& L_loop, bool use_prefetch, bool use_bis);
+
+  void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
+                          int iter_size, CopyLoopFunc copy_loop_func) {
+    Label L_copy;
+
+    assert(log2_elem_size <= 3, "the following code should be changed");
+    int count_dec = 16>>log2_elem_size;
+
+    int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance);
+    assert(prefetch_dist < 4096, "invalid value");
+    prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size
+    int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count
+
+    if (UseBlockCopy) {
+      Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy;
+
+      // 64 bytes tail + bytes copied in one loop iteration
+      int tail_size = 64 + iter_size;
+      int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size;
+      // Use BIS copy only for big arrays since it requires membar.
+      __ set(block_copy_count, O4);
+      __ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy);
+      // This code is for disjoint source and destination:
+      //   to <= from || to >= from+count
+      // but BIS will stomp over 'from' if (to > from-tail_size && to <= from)
+      __ sub(from, to, O4);
+      __ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm.
+      __ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy);
+
+      __ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
+      // BIS should not be used to copy tail (64 bytes+iter_size)
+      // to avoid zeroing of following values.
+      __ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0
+
+      if (prefetch_count > 0) { // rounded up to one iteration count
+        // Do prefetching only if copy size is bigger
+        // than prefetch distance.
+        __ set(prefetch_count, O4);
+        __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
+        __ sub(count, prefetch_count, count);
+
+        (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
+        __ add(count, prefetch_count, count); // restore count
+
+      } // prefetch_count > 0
+
+      (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true);
+      __ add(count, (tail_size>>log2_elem_size), count); // restore count
+
+      __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
+      // BIS needs membar.
+      __ membar(Assembler::StoreLoad);
+      // Copy tail
+      __ ba_short(L_copy);
+
+      __ BIND(L_skip_block_copy);
+    } // UseBlockCopy
+
+    if (prefetch_count > 0) { // rounded up to one iteration count
+      // Do prefetching only if copy size is bigger
+      // than prefetch distance.
+      __ set(prefetch_count, O4);
+      __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
+      __ sub(count, prefetch_count, count);
+
+      Label L_copy_prefetch;
+      (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
+      __ add(count, prefetch_count, count); // restore count
+
+    } // prefetch_count > 0
+
+    (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false);
+  }
+
+
+
+  //
+  // Helper methods for copy_16_bytes_forward_with_shift()
+  //
+  void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec,
+                                Label& L_loop, bool use_prefetch, bool use_bis) {
+
+    const Register left_shift  = G1; // left  shift bit counter
+    const Register right_shift = G5; // right shift bit counter
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loop);
+    if (use_prefetch) {
+      if (ArraycopySrcPrefetchDistance > 0) {
+        __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
+      }
+      if (ArraycopyDstPrefetchDistance > 0) {
+        __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
+      }
+    }
+    __ ldx(from, 0, O4);
+    __ ldx(from, 8, G4);
+    __ inc(to, 16);
+    __ inc(from, 16);
+    __ deccc(count, count_dec); // Can we do next iteration after this one?
+    __ srlx(O4, right_shift, G3);
+    __ bset(G3, O3);
+    __ sllx(O4, left_shift,  O4);
+    __ srlx(G4, right_shift, G3);
+    __ bset(G3, O4);
+    if (use_bis) {
+      __ stxa(O3, to, -16);
+      __ stxa(O4, to, -8);
+    } else {
+      __ stx(O3, to, -16);
+      __ stx(O4, to, -8);
+    }
+    __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
+    __ delayed()->sllx(G4, left_shift,  O3);
+  }
 
   // Copy big chunks forward with shift
   //
@@ -1146,64 +1250,51 @@
   //   L_copy_bytes - copy exit label
   //
   void copy_16_bytes_forward_with_shift(Register from, Register to,
-                     Register count, int count_dec, Label& L_copy_bytes) {
-    Label L_loop, L_aligned_copy, L_copy_last_bytes;
+                     Register count, int log2_elem_size, Label& L_copy_bytes) {
+    Label L_aligned_copy, L_copy_last_bytes;
+    assert(log2_elem_size <= 3, "the following code should be changed");
+    int count_dec = 16>>log2_elem_size;
 
     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
-      __ andcc(from, 7, G1); // misaligned bytes
-      __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
-      __ delayed()->nop();
+    __ andcc(from, 7, G1); // misaligned bytes
+    __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
+    __ delayed()->nop();
 
     const Register left_shift  = G1; // left  shift bit counter
     const Register right_shift = G5; // right shift bit counter
 
-      __ sll(G1, LogBitsPerByte, left_shift);
-      __ mov(64, right_shift);
-      __ sub(right_shift, left_shift, right_shift);
+    __ sll(G1, LogBitsPerByte, left_shift);
+    __ mov(64, right_shift);
+    __ sub(right_shift, left_shift, right_shift);
 
     //
     // Load 2 aligned 8-bytes chunks and use one from previous iteration
     // to form 2 aligned 8-bytes chunks to store.
     //
-      __ deccc(count, count_dec); // Pre-decrement 'count'
-      __ andn(from, 7, from);     // Align address
-      __ ldx(from, 0, O3);
-      __ inc(from, 8);
-      __ align(OptoLoopAlignment);
-    __ BIND(L_loop);
-      __ ldx(from, 0, O4);
-      __ deccc(count, count_dec); // Can we do next iteration after this one?
-      __ ldx(from, 8, G4);
-      __ inc(to, 16);
-      __ inc(from, 16);
-      __ sllx(O3, left_shift,  O3);
-      __ srlx(O4, right_shift, G3);
-      __ bset(G3, O3);
-      __ stx(O3, to, -16);
-      __ sllx(O4, left_shift,  O4);
-      __ srlx(G4, right_shift, G3);
-      __ bset(G3, O4);
-      __ stx(O4, to, -8);
-      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
-      __ delayed()->mov(G4, O3);
-
-      __ inccc(count, count_dec>>1 ); // + 8 bytes
-      __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
-      __ delayed()->inc(count, count_dec>>1); // restore 'count'
-
-      // copy 8 bytes, part of them already loaded in O3
-      __ ldx(from, 0, O4);
-      __ inc(to, 8);
-      __ inc(from, 8);
-      __ sllx(O3, left_shift,  O3);
-      __ srlx(O4, right_shift, G3);
-      __ bset(O3, G3);
-      __ stx(G3, to, -8);
+    __ dec(count, count_dec);   // Pre-decrement 'count'
+    __ andn(from, 7, from);     // Align address
+    __ ldx(from, 0, O3);
+    __ inc(from, 8);
+    __ sllx(O3, left_shift,  O3);
+
+    disjoint_copy_core(from, to, count, log2_elem_size, 16, copy_16_bytes_shift_loop);
+
+    __ inccc(count, count_dec>>1 ); // + 8 bytes
+    __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
+    __ delayed()->inc(count, count_dec>>1); // restore 'count'
+
+    // copy 8 bytes, part of them already loaded in O3
+    __ ldx(from, 0, O4);
+    __ inc(to, 8);
+    __ inc(from, 8);
+    __ srlx(O4, right_shift, G3);
+    __ bset(O3, G3);
+    __ stx(G3, to, -8);
 
     __ BIND(L_copy_last_bytes);
-      __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
-      __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
-      __ delayed()->sub(from, right_shift, from);       // restore address
+    __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
+    __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
+    __ delayed()->sub(from, right_shift, from);       // restore address
 
     __ BIND(L_aligned_copy);
   }
@@ -1359,7 +1450,7 @@
       // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
       // Also jump over aligned copy after the copy with shift completed.
 
-      copy_16_bytes_forward_with_shift(from, to, count, 16, L_copy_byte);
+      copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte);
     }
 
     // Both array are 8 bytes aligned, copy 16 bytes at a time
@@ -1370,8 +1461,7 @@
 
     // copy tailing bytes
     __ BIND(L_copy_byte);
-      __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
-      __ delayed()->nop();
+      __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
       __ align(OptoLoopAlignment);
     __ BIND(L_copy_byte_loop);
       __ ldub(from, offset, O3);
@@ -1482,8 +1572,7 @@
 
     // copy 1 element (2 bytes) at a time
     __ BIND(L_copy_byte);
-      __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
-      __ delayed()->nop();
+      __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
       __ align(OptoLoopAlignment);
     __ BIND(L_copy_byte_loop);
       __ dec(end_from);
@@ -1589,7 +1678,7 @@
       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
       // Also jump over aligned copy after the copy with shift completed.
 
-      copy_16_bytes_forward_with_shift(from, to, count, 8, L_copy_2_bytes);
+      copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes);
     }
 
     // Both array are 8 bytes aligned, copy 16 bytes at a time
@@ -1600,8 +1689,7 @@
 
     // copy 1 element at a time
     __ BIND(L_copy_2_bytes);
-      __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
-      __ delayed()->nop();
+      __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
       __ align(OptoLoopAlignment);
     __ BIND(L_copy_2_bytes_loop);
       __ lduh(from, offset, O3);
@@ -1946,8 +2034,7 @@
 
     // copy 1 element (2 bytes) at a time
     __ BIND(L_copy_2_bytes);
-      __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
-      __ delayed()->nop();
+      __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
     __ BIND(L_copy_2_bytes_loop);
       __ dec(end_from, 2);
       __ dec(end_to, 2);
@@ -1965,6 +2052,45 @@
   }
 
   //
+  // Helper methods for generate_disjoint_int_copy_core()
+  //
+  void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec,
+                          Label& L_loop, bool use_prefetch, bool use_bis) {
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loop);
+    if (use_prefetch) {
+      if (ArraycopySrcPrefetchDistance > 0) {
+        __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
+      }
+      if (ArraycopyDstPrefetchDistance > 0) {
+        __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
+      }
+    }
+    __ ldx(from, 4, O4);
+    __ ldx(from, 12, G4);
+    __ inc(to, 16);
+    __ inc(from, 16);
+    __ deccc(count, 4); // Can we do next iteration after this one?
+
+    __ srlx(O4, 32, G3);
+    __ bset(G3, O3);
+    __ sllx(O4, 32, O4);
+    __ srlx(G4, 32, G3);
+    __ bset(G3, O4);
+    if (use_bis) {
+      __ stxa(O3, to, -16);
+      __ stxa(O4, to, -8);
+    } else {
+      __ stx(O3, to, -16);
+      __ stx(O4, to, -8);
+    }
+    __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
+    __ delayed()->sllx(G4, 32,  O3);
+
+  }
+
+  //
   //  Generate core code for disjoint int copy (and oop copy on 32-bit).
   //  If "aligned" is true, the "from" and "to" addresses are assumed
   //  to be heapword aligned.
@@ -1977,7 +2103,7 @@
   void generate_disjoint_int_copy_core(bool aligned) {
 
     Label L_skip_alignment, L_aligned_copy;
-    Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
+    Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
 
     const Register from      = O0;   // source array address
     const Register to        = O1;   // destination array address
@@ -2028,30 +2154,16 @@
 
     // copy with shift 4 elements (16 bytes) at a time
       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
-
-      __ align(OptoLoopAlignment);
-    __ BIND(L_copy_16_bytes);
-      __ ldx(from, 4, O4);
-      __ deccc(count, 4); // Can we do next iteration after this one?
-      __ ldx(from, 12, G4);
-      __ inc(to, 16);
-      __ inc(from, 16);
-      __ sllx(O3, 32, O3);
-      __ srlx(O4, 32, G3);
-      __ bset(G3, O3);
-      __ stx(O3, to, -16);
-      __ sllx(O4, 32, O4);
-      __ srlx(G4, 32, G3);
-      __ bset(G3, O4);
-      __ stx(O4, to, -8);
-      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
-      __ delayed()->mov(G4, O3);
+      __ sllx(O3, 32,  O3);
+
+      disjoint_copy_core(from, to, count, 2, 16, copy_16_bytes_loop);
 
       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
       __ delayed()->inc(count, 4); // restore 'count'
 
     __ BIND(L_aligned_copy);
-    }
+    } // !aligned
+
     // copy 4 elements (16 bytes) at a time
       __ and3(count, 1, G4); // Save
       __ srl(count, 1, count);
@@ -2060,8 +2172,7 @@
 
     // copy 1 element at a time
     __ BIND(L_copy_4_bytes);
-      __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
-      __ delayed()->nop();
+      __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
     __ BIND(L_copy_4_bytes_loop);
       __ ld(from, offset, O3);
       __ deccc(count);
@@ -2193,8 +2304,7 @@
 
     // copy 1 element (4 bytes) at a time
     __ BIND(L_copy_4_bytes);
-      __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
-      __ delayed()->nop();
+      __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
     __ BIND(L_copy_4_bytes_loop);
       __ dec(end_from, 4);
       __ dec(end_to, 4);
@@ -2240,6 +2350,38 @@
   }
 
   //
+  // Helper methods for generate_disjoint_long_copy_core()
+  //
+  void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec,
+                          Label& L_loop, bool use_prefetch, bool use_bis) {
+    __ align(OptoLoopAlignment);
+    __ BIND(L_loop);
+    for (int off = 0; off < 64; off += 16) {
+      if (use_prefetch && (off & 31) == 0) {
+        if (ArraycopySrcPrefetchDistance > 0) {
+          __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
+        }
+        if (ArraycopyDstPrefetchDistance > 0) {
+          __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
+        }
+      }
+      __ ldx(from,  off+0, O4);
+      __ ldx(from,  off+8, O5);
+      if (use_bis) {
+        __ stxa(O4, to,  off+0);
+        __ stxa(O5, to,  off+8);
+      } else {
+        __ stx(O4, to,  off+0);
+        __ stx(O5, to,  off+8);
+      }
+    }
+    __ deccc(count, 8);
+    __ inc(from, 64);
+    __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
+    __ delayed()->inc(to, 64);
+  }
+
+  //
   //  Generate core code for disjoint long copy (and oop copy on 64-bit).
   //  "aligned" is ignored, because we must make the stronger
   //  assumption that both addresses are always 64-bit aligned.
@@ -2278,38 +2420,28 @@
     const Register offset0 = O4;  // element offset
     const Register offset8 = O5;  // next element offset
 
-      __ deccc(count, 2);
-      __ mov(G0, offset0);   // offset from start of arrays (0)
-      __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
-      __ delayed()->add(offset0, 8, offset8);
+    __ deccc(count, 2);
+    __ mov(G0, offset0);   // offset from start of arrays (0)
+    __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
+    __ delayed()->add(offset0, 8, offset8);
 
     // Copy by 64 bytes chunks
-    Label L_copy_64_bytes;
+
     const Register from64 = O3;  // source address
     const Register to64   = G3;  // destination address
-      __ subcc(count, 6, O3);
-      __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
-      __ delayed()->mov(to,   to64);
-      // Now we can use O4(offset0), O5(offset8) as temps
-      __ mov(O3, count);
-      __ mov(from, from64);
-
-      __ align(OptoLoopAlignment);
-    __ BIND(L_copy_64_bytes);
-      for( int off = 0; off < 64; off += 16 ) {
-        __ ldx(from64,  off+0, O4);
-        __ ldx(from64,  off+8, O5);
-        __ stx(O4, to64,  off+0);
-        __ stx(O5, to64,  off+8);
-      }
-      __ deccc(count, 8);
-      __ inc(from64, 64);
-      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
-      __ delayed()->inc(to64, 64);
+    __ subcc(count, 6, O3);
+    __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
+    __ delayed()->mov(to,   to64);
+    // Now we can use O4(offset0), O5(offset8) as temps
+    __ mov(O3, count);
+    // count >= 0 (original count - 8)
+    __ mov(from, from64);
+
+    disjoint_copy_core(from64, to64, count, 3, 64, copy_64_bytes_loop);
 
       // Restore O4(offset0), O5(offset8)
       __ sub(from64, from, offset0);
-      __ inccc(count, 6);
+      __ inccc(count, 6); // restore count
       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
       __ delayed()->add(offset0, 8, offset8);
 
@@ -2576,7 +2708,7 @@
                                      super_klass->after_save(),
                                      L0, L1, L2, L4,
                                      NULL, &L_pop_to_miss);
-    __ ba(false, L_success);
+    __ ba(L_success);
     __ delayed()->restore();
 
     __ bind(L_pop_to_miss);
@@ -2673,8 +2805,7 @@
     // ======== loop entry is here ========
     __ BIND(load_element);
     __ load_heap_oop(O0_from, O5_offset, G3_oop);  // load the oop
-    __ br_null(G3_oop, true, Assembler::pt, store_element);
-    __ delayed()->nop();
+    __ br_null_short(G3_oop, Assembler::pt, store_element);
 
     __ load_klass(G3_oop, G4_klass); // query the object klass
 
@@ -2896,8 +3027,7 @@
     //  assert(src->klass() != NULL);
     BLOCK_COMMENT("assert klasses not null");
     { Label L_a, L_b;
-      __ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL
-      __ delayed()->nop();
+      __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL
       __ bind(L_a);
       __ stop("broken null klass");
       __ bind(L_b);
@@ -2937,9 +3067,7 @@
     }
 
     //  if (src->klass() != dst->klass()) return -1;
-    __ cmp(G3_src_klass, G4_dst_klass);
-    __ brx(Assembler::notEqual, false, Assembler::pn, L_failed);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed);
 
     //  if (!src->is_Array()) return -1;
     __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
@@ -3007,9 +3135,7 @@
     __ delayed()->signx(length, count); // length
 #ifdef ASSERT
     { Label L;
-      __ cmp(G3_elsize, LogBytesPerLong);
-      __ br(Assembler::equal, false, Assembler::pt, L);
-      __ delayed()->nop();
+      __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L);
       __ stop("must be long copy, but elsize is wrong");
       __ bind(L);
     }
@@ -3092,6 +3218,34 @@
     return start;
   }
 
+  //
+  //  Generate stub for heap zeroing.
+  //  "to" address is aligned to jlong (8 bytes).
+  //
+  // Arguments for generated stub:
+  //      to:    O0
+  //      count: O1 treated as signed (count of HeapWord)
+  //             count could be 0
+  //
+  address generate_zero_aligned_words(const char* name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    const Register to    = O0;   // source array address
+    const Register count = O1;   // HeapWords count
+    const Register temp  = O2;   // scratch
+
+    Label Ldone;
+    __ sllx(count, LogHeapWordSize, count); // to bytes count
+    // Use BIS for zeroing
+    __ bis_zeroing(to, count, temp, Ldone);
+    __ bind(Ldone);
+    __ retl();
+    __ delayed()->nop();
+    return start;
+}
+
   void generate_arraycopy_stubs() {
     address entry;
     address entry_jbyte_arraycopy;
@@ -3218,6 +3372,10 @@
     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
+
+    if (UseBlockZeroing) {
+      StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words");
+    }
   }
 
   void generate_initial() {
@@ -3255,7 +3413,7 @@
     StubRoutines::_throw_WrongMethodTypeException_entry =
       generate_throw_exception("WrongMethodTypeException throw_exception",
                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
-                               false, G5_method_type, G3_method_handle);
+                               G5_method_type, G3_method_handle);
   }
 
 
@@ -3266,12 +3424,10 @@
     // UseZeroBaseCompressedOops which is defined after heap initialization.
     StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
     // These entry points require SharedInfo::stack0 to be set up in non-core builds
-    StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
-    StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
-    StubRoutines::_throw_ArithmeticException_entry         = generate_throw_exception("ArithmeticException throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException),  true);
-    StubRoutines::_throw_NullPointerException_entry        = generate_throw_exception("NullPointerException throw_exception",         CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
-    StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
-    StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError),   false);
+    StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
+    StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
+    StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
+    StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
 
     StubRoutines::_handler_for_unsafe_access_entry =
       generate_handler_for_unsafe_access();
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -190,9 +190,7 @@
   const Register size  = G1_scratch;
   if (EnableInvokeDynamic) {
     __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode.
-    __ cmp(G1_scratch, Bytecodes::_invokedynamic);
-    __ br(Assembler::equal, false, Assembler::pn, L_giant_index);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index);
   }
   __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
   __ bind(L_got_cache);
@@ -207,8 +205,7 @@
   if (EnableInvokeDynamic) {
     __ bind(L_giant_index);
     __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
-    __ ba(false, L_got_cache);
-    __ delayed()->nop();
+    __ ba_short(L_got_cache);
   }
 
   return entry;
@@ -221,9 +218,7 @@
   { Label L;
     Address exception_addr(G2_thread, Thread::pending_exception_offset());
     __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
-    __ tst(Gtemp);
-    __ brx(Assembler::equal, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ br_null_short(Gtemp, Assembler::pt, L);
     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
     __ should_not_reach_here();
     __ bind(L);
@@ -304,8 +299,7 @@
     if (ProfileInterpreter) {
       // If no method data exists, go to profile_continue.
       __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
-      __ br_null(G4_scratch, false, Assembler::pn, no_mdo);
-      __ delayed()->nop();
+      __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
       // Increment counter
       Address mdo_invocation_counter(G4_scratch,
                                      in_bytes(methodDataOopDesc::invocation_counter_offset()) +
@@ -313,8 +307,7 @@
       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
                                  G3_scratch, Lscratch,
                                  Assembler::zero, overflow);
-      __ ba(false, done);
-      __ delayed()->nop();
+      __ ba_short(done);
     }
 
     // Increment counter in methodOop
@@ -340,9 +333,7 @@
       // Test to see if we should create a method data oop
       AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
       __ load_contents(profile_limit, G3_scratch);
-      __ cmp(O0, G3_scratch);
-      __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
-      __ delayed()->nop();
+      __ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
 
       // if no method data exists, go to profile_method
       __ test_method_data_pointer(*profile_method);
@@ -351,7 +342,7 @@
     AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
     __ load_contents(invocation_limit, G3_scratch);
     __ cmp(O0, G3_scratch);
-    __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
+    __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
     __ delayed()->nop();
   }
 
@@ -410,19 +401,14 @@
 
   assert_different_registers(Rframe_size, Rscratch, Rscratch2);
 
-  __ set( page_size,   Rscratch );
-  __ cmp( Rframe_size, Rscratch );
-
-  __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check );
-  __ delayed()->nop();
+  __ set(page_size, Rscratch);
+  __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
 
   // get the stack base, and in debug, verify it is non-zero
   __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
 #ifdef ASSERT
   Label base_not_zero;
-  __ cmp( Rscratch, G0 );
-  __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero );
-  __ delayed()->nop();
+  __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
   __ stop("stack base is zero in generate_stack_overflow_check");
   __ bind(base_not_zero);
 #endif
@@ -432,9 +418,7 @@
   __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
 #ifdef ASSERT
   Label size_not_zero;
-  __ cmp( Rscratch2, G0 );
-  __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero );
-  __ delayed()->nop();
+  __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
   __ stop("stack size is zero in generate_stack_overflow_check");
   __ bind(size_not_zero);
 #endif
@@ -450,9 +434,7 @@
 
   // the frame is greater than one page in size, so check against
   // the bottom of the stack
-  __ cmp( SP, Rscratch );
-  __ brx( Assembler::greater, false, Assembler::pt, after_frame_check );
-  __ delayed()->nop();
+  __ cmp_and_brx_short(SP, Rscratch, Assembler::greater, Assembler::pt, after_frame_check);
 
   // Save the return address as the exception pc
   __ st_ptr(O7, saved_exception_pc);
@@ -624,9 +606,7 @@
     // If we need a safepoint check, generate full interpreter entry.
     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
     __ set(sync_state, G3_scratch);
-    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
-    __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
 
     // Code: _return
     __ retl();
@@ -664,14 +644,12 @@
     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
     __ load_contents(sync_state, G3_scratch);
     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
-    __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
 
     // Check if local 0 != NULL
     __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
-    __ tst(Otos_i);  // check if local 0 == NULL and go the slow path
-    __ brx(Assembler::zero, false, Assembler::pn, slow_path);
-    __ delayed()->nop();
+    // check if local 0 == NULL and go the slow path
+    __ br_null_short(Otos_i, Assembler::pn, slow_path);
 
 
     // read first instruction word and extract bytecode @ 1 and index @ 2
@@ -697,9 +675,7 @@
     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
     __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
     __ and3(G1_scratch, 0xFF, G1_scratch);
-    __ cmp(G1_scratch, Bytecodes::_getfield);
-    __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
 
     // Get the type and return field offset from the constant pool cache
     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
@@ -787,9 +763,8 @@
     // Check if local 0 != NULL
     // If the receiver is null then it is OK to jump to the slow path.
     __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
-    __ tst(Otos_i);  // check if local 0 == NULL and go the slow path
-    __ brx(Assembler::zero, false, Assembler::pn, slow_path);
-    __ delayed()->nop();
+    // check if local 0 == NULL and go the slow path
+    __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
 
 
     // Load the value of the referent field.
@@ -952,9 +927,7 @@
   { Label L;
     Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
     __ ld_ptr(signature_handler, G3_scratch);
-    __ tst(G3_scratch);
-    __ brx(Assembler::notZero, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ br_notnull_short(G3_scratch, Assembler::pt, L);
     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
     __ ld_ptr(signature_handler, G3_scratch);
     __ bind(L);
@@ -1019,9 +992,7 @@
 #ifdef ASSERT
     if (!PrintSignatureHandlers)  // do not dirty the output with this
     { Label L;
-      __ tst(O1);
-      __ brx(Assembler::notZero, false, Assembler::pt, L);
-      __ delayed()->nop();
+      __ br_notnull_short(O1, Assembler::pt, L);
       __ stop("mirror is missing");
       __ bind(L);
     }
@@ -1038,9 +1009,7 @@
 
 #ifdef ASSERT
   { Label L;
-    __ tst(O0);
-    __ brx(Assembler::notZero, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ br_notnull_short(O0, Assembler::pt, L);
     __ stop("native entry point is missing");
     __ bind(L);
   }
@@ -1079,9 +1048,7 @@
 #ifdef ASSERT
   { Label L;
     __ ld(thread_state, G3_scratch);
-    __ cmp(G3_scratch, _thread_in_Java);
-    __ br(Assembler::equal, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
     __ stop("Wrong thread state in native stub");
     __ bind(L);
   }
@@ -1134,9 +1101,7 @@
     Label L;
     __ br(Assembler::notEqual, false, Assembler::pn, L);
     __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
-    __ cmp(G3_scratch, 0);
-    __ br(Assembler::equal, false, Assembler::pt, no_block);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
     __ bind(L);
 
     // Block.  Save any potential method result value before the operation and
@@ -1185,9 +1150,7 @@
     Label no_oop, store_result;
 
     __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
-    __ cmp(G3_scratch, Lscratch);
-    __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
     __ addcc(G0, O0, O0);
     __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
     __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
@@ -1206,9 +1169,7 @@
   { Label L;
     Address exception_addr(G2_thread, Thread::pending_exception_offset());
     __ ld_ptr(exception_addr, Gtemp);
-    __ tst(Gtemp);
-    __ brx(Assembler::equal, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ br_null_short(Gtemp, Assembler::pt, L);
     // Note: This could be handled more efficiently since we know that the native
     //       method doesn't have an exception handler. We could directly return
     //       to the exception handler for the caller.
@@ -1245,9 +1206,7 @@
 #ifdef ASSERT
   {
     Label ok;
-    __ cmp(I5_savedSP, FP);
-    __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
     __ stop("bad I5_savedSP value");
     __ should_not_reach_here();
     __ bind(ok);
@@ -1429,8 +1388,7 @@
 
       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
       __ set_method_data_pointer_for_bcp();
-      __ ba(false, profile_method_continue);
-      __ delayed()->nop();
+      __ ba_short(profile_method_continue);
     }
 
     // handle invocation counter overflow
@@ -1856,9 +1814,7 @@
     // adapter frames in C2.
     Label caller_not_deoptimized;
     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
-    __ tst(O0);
-    __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized);
-    __ delayed()->nop();
+    __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
 
     const Register Gtmp1 = G3_scratch;
     const Register Gtmp2 = G1_scratch;
@@ -1992,10 +1948,10 @@
 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
   Label L;
-  aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop();
-  fep = __ pc(); __ push_f();   __ ba(false, L); __ delayed()->nop();
-  dep = __ pc(); __ push_d();   __ ba(false, L); __ delayed()->nop();
-  lep = __ pc(); __ push_l();   __ ba(false, L); __ delayed()->nop();
+  aep = __ pc(); __ push_ptr(); __ ba_short(L);
+  fep = __ pc(); __ push_f();   __ ba_short(L);
+  dep = __ pc(); __ push_d();   __ ba_short(L);
+  lep = __ pc(); __ push_l();   __ ba_short(L);
   iep = __ pc(); __ push_i();
   bep = cep = sep = iep;                        // there aren't any
   vep = __ pc(); __ bind(L);                    // fall through
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -149,39 +149,68 @@
 }
 
 
-void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register Rbyte_code,
-                                   Register Rscratch,
-                                   bool load_bc_into_scratch /*=true*/) {
+void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
+                                   Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
+                                   int byte_no) {
   // With sharing on, may need to test methodOop flag.
-  if (!RewriteBytecodes) return;
-  if (load_bc_into_scratch) __ set(bc, Rbyte_code);
-  Label patch_done;
+  if (!RewriteBytecodes)  return;
+  Label L_patch_done;
+
+  switch (bc) {
+  case Bytecodes::_fast_aputfield:
+  case Bytecodes::_fast_bputfield:
+  case Bytecodes::_fast_cputfield:
+  case Bytecodes::_fast_dputfield:
+  case Bytecodes::_fast_fputfield:
+  case Bytecodes::_fast_iputfield:
+  case Bytecodes::_fast_lputfield:
+  case Bytecodes::_fast_sputfield:
+    {
+      // We skip bytecode quickening for putfield instructions when
+      // the put_code written to the constant pool cache is zero.
+      // This is required so that every execution of this instruction
+      // calls out to InterpreterRuntime::resolve_get_put to do
+      // additional, required work.
+      assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+      assert(load_bc_into_bc_reg, "we use bc_reg as temp");
+      __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
+      __ set(bc, bc_reg);
+      __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done);  // don't patch
+    }
+    break;
+  default:
+    assert(byte_no == -1, "sanity");
+    if (load_bc_into_bc_reg) {
+      __ set(bc, bc_reg);
+    }
+  }
+
   if (JvmtiExport::can_post_breakpoint()) {
-    Label fast_patch;
-    __ ldub(at_bcp(0), Rscratch);
-    __ cmp(Rscratch, Bytecodes::_breakpoint);
-    __ br(Assembler::notEqual, false, Assembler::pt, fast_patch);
-    __ delayed()->nop();  // don't bother to hoist the stb here
+    Label L_fast_patch;
+    __ ldub(at_bcp(0), temp_reg);
+    __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
     // perform the quickening, slowly, in the bowels of the breakpoint table
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, Rbyte_code);
-    __ ba(false, patch_done);
-    __ delayed()->nop();
-    __ bind(fast_patch);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
+    __ ba_short(L_patch_done);
+    __ bind(L_fast_patch);
   }
+
 #ifdef ASSERT
   Bytecodes::Code orig_bytecode =  Bytecodes::java_code(bc);
-  Label okay;
-  __ ldub(at_bcp(0), Rscratch);
-  __ cmp(Rscratch, orig_bytecode);
-  __ br(Assembler::equal, false, Assembler::pt, okay);
-  __ delayed() ->cmp(Rscratch, Rbyte_code);
-  __ br(Assembler::equal, false, Assembler::pt, okay);
+  Label L_okay;
+  __ ldub(at_bcp(0), temp_reg);
+  __ cmp(temp_reg, orig_bytecode);
+  __ br(Assembler::equal, false, Assembler::pt, L_okay);
+  __ delayed()->cmp(temp_reg, bc_reg);
+  __ br(Assembler::equal, false, Assembler::pt, L_okay);
   __ delayed()->nop();
-  __ stop("Rewriting wrong bytecode location");
-  __ bind(okay);
+  __ stop("patching the wrong bytecode");
+  __ bind(L_okay);
 #endif
-  __ stb(Rbyte_code, at_bcp(0));
-  __ bind(patch_done);
+
+  // patch bytecode
+  __ stb(bc_reg, at_bcp(0));
+  __ bind(L_patch_done);
 }
 
 //----------------------------------------------------------------------------------------------------
@@ -281,17 +310,14 @@
   // get type from tags
   __ add(O2, tags_offset, O2);
   __ ldub(O2, O1, O2);
-  __ cmp(O2, JVM_CONSTANT_UnresolvedString);    // unresolved string? If so, must resolve
-  __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
-  __ delayed()->nop();
-
-  __ cmp(O2, JVM_CONSTANT_UnresolvedClass);     // unresolved class? If so, must resolve
-  __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
-  __ delayed()->nop();
-
-  __ cmp(O2, JVM_CONSTANT_UnresolvedClassInError);     // unresolved class in error state
-  __ brx(Assembler::equal, true, Assembler::pn, call_ldc);
-  __ delayed()->nop();
+  // unresolved string? If so, must resolve
+  __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedString, Assembler::equal, Assembler::pt, call_ldc);
+
+  // unresolved class? If so, must resolve
+  __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc);
+
+  // unresolved class in error state
+  __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
 
   __ cmp(O2, JVM_CONSTANT_Class);      // need to call vm to get java mirror of the class
   __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
@@ -301,8 +327,7 @@
   __ set(wide, O1);
   call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
   __ push(atos);
-  __ ba(false, exit);
-  __ delayed()->nop();
+  __ ba_short(exit);
 
   __ bind(notClass);
  // __ add(O0, base_offset, O0);
@@ -312,8 +337,7 @@
   __ delayed()->cmp(O2, JVM_CONSTANT_String);
   __ ld(O0, O1, Otos_i);
   __ push(itos);
-  __ ba(false, exit);
-  __ delayed()->nop();
+  __ ba_short(exit);
 
   __ bind(notInt);
  // __ cmp(O2, JVM_CONSTANT_String);
@@ -325,8 +349,7 @@
   __ ld_ptr(O0, O1, Otos_i);
   __ verify_oop(Otos_i);
   __ push(atos);
-  __ ba(false, exit);
-  __ delayed()->nop();
+  __ ba_short(exit);
 
   __ bind(notString);
  // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
@@ -365,9 +388,7 @@
   __ load_klass(Otos_i, Rcon_klass);
   AddressLiteral array_klass_addr((address)Universe::systemObjArrayKlassObj_addr());
   __ load_contents(array_klass_addr, Rarray_klass);
-  __ cmp(Rarray_klass, Rcon_klass);
-  __ brx(Assembler::notEqual, false, Assembler::pt, L_done);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(Rarray_klass, Rcon_klass, Assembler::notEqual, Assembler::pt, L_done);
   __ ld(Address(Otos_i, arrayOopDesc::length_offset_in_bytes()), Rcon_klass);
   __ tst(Rcon_klass);
   __ brx(Assembler::zero, true, Assembler::pt, L_done);
@@ -397,9 +418,7 @@
   __ sll(O1, LogBytesPerWord, O1);
   __ add(O0, O1, G3_scratch);
 
-  __ cmp(O2, JVM_CONSTANT_Double);
-  __ brx(Assembler::notEqual, false, Assembler::pt, Long);
-  __ delayed()->nop();
+  __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
   // A double can be placed at word-aligned locations in the constant pool.
   // Check out Conversions.java for an example.
   // Also constantPoolOopDesc::header_size() is 20, which makes it very difficult
@@ -413,8 +432,7 @@
          f->successor());
 #endif
   __ push(dtos);
-  __ ba(false, exit);
-  __ delayed()->nop();
+  __ ba_short(exit);
 
   __ bind(Long);
 #ifdef _LP64
@@ -453,9 +471,7 @@
     // last two iloads in a pair.  Comparing against fast_iload means that
     // the next bytecode is neither an iload or a caload, and therefore
     // an iload pair.
-    __ cmp(G3_scratch, (int)Bytecodes::_iload);
-    __ br(Assembler::equal, false, Assembler::pn, done);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done);
 
     __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
     __ br(Assembler::equal, false, Assembler::pn, rewrite);
@@ -697,9 +713,7 @@
     aload(0);
 
     // if _getfield then wait with rewrite
-    __ cmp(G3_scratch, (int)Bytecodes::_getfield);
-    __ br(Assembler::equal, false, Assembler::pn, done);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
 
     // if _igetfield then rewrite to _fast_iaccess_0
     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
@@ -867,8 +881,7 @@
   __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
 
   // do array store check - check for NULL value first
-  __ br_null( Otos_i, false, Assembler::pn, is_null );
-  __ delayed()->nop();
+  __ br_null_short( Otos_i, Assembler::pn, is_null );
 
   __ load_klass(O3, O4); // get array klass
   __ load_klass(Otos_i, O5); // get value klass
@@ -899,7 +912,7 @@
   __ bind(store_ok);
   do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
 
-  __ ba(false,done);
+  __ ba(done);
   __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
 
   __ bind(is_null);
@@ -1633,16 +1646,14 @@
       if (ProfileInterpreter) {
         // If no method data exists, go to profile_continue.
         __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
-        __ br_null(G4_scratch, false, Assembler::pn, Lno_mdo);
-        __ delayed()->nop();
+        __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
 
         // Increment backedge counter in the MDO
         Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
                                                  in_bytes(InvocationCounter::counter_offset()));
         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
                                    Assembler::notZero, &Lforward);
-        __ ba(false, Loverflow);
-        __ delayed()->nop();
+        __ ba_short(Loverflow);
       }
 
       // If there's no MDO, increment counter in methodOop
@@ -1658,14 +1669,11 @@
 
       // Was an OSR adapter generated?
       // O0 = osr nmethod
-      __ br_null(O0, false, Assembler::pn, Lforward);
-      __ delayed()->nop();
+      __ br_null_short(O0, Assembler::pn, Lforward);
 
       // Has the nmethod been invalidated already?
       __ ld(O0, nmethod::entry_bci_offset(), O2);
-      __ cmp(O2, InvalidOSREntryBci);
-      __ br(Assembler::equal, false, Assembler::pn, Lforward);
-      __ delayed()->nop();
+      __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward);
 
       // migrate the interpreter frame off of the stack
 
@@ -1830,7 +1838,7 @@
   __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
   __ sll(O2, LogBytesPerInt, O2);
   __ add(O2, 3 * BytesPerInt, O2);
-  __ ba(false, continue_execution);
+  __ ba(continue_execution);
   __ delayed()->ld(O1, O2, O2);
   // handle default
   __ bind(default_case);
@@ -1858,7 +1866,7 @@
   __ ld(O1, BytesPerInt, O2);
   __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
   __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
-  __ ba(false, loop_entry);
+  __ ba(loop_entry);
   __ delayed()->add(O3, O2, O2); // counter now points past last pair
 
   // table search
@@ -1877,8 +1885,7 @@
   __ ld(O1, 0, O4); // get default offset
   if (ProfileInterpreter) {
     __ profile_switch_default(O3);
-    __ ba(false, continue_execution);
-    __ delayed()->nop();
+    __ ba_short(continue_execution);
   }
 
   // entry found -> get offset
@@ -1944,7 +1951,7 @@
 
   // and start
   Label entry;
-  __ ba(false, entry);
+  __ ba(entry);
   __ delayed()->ld( Rarray, -BytesPerInt, Rj);
   // (Rj is already in the native byte-ordering.)
 
@@ -2002,8 +2009,7 @@
   // (Rj is already in the native byte-ordering.)
 
   if (ProfileInterpreter) {
-    __ ba(false, continue_execution);
-    __ delayed()->nop();
+    __ ba_short(continue_execution);
   }
 
   __ bind(default_case); // fall through (if not profiling)
@@ -2087,12 +2093,12 @@
   // Depends on cpCacheOop layout!
   Label resolved;
 
-  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
   if (byte_no == f1_oop) {
     // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
     // This kind of CP cache entry does not need to match the flags byte, because
     // there is a 1-1 relation between bytecode type and CP entry type.
     assert_different_registers(result, Rcache);
+    __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
     __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
               ConstantPoolCacheEntry::f1_offset(), result);
     __ tst(result);
@@ -2101,15 +2107,9 @@
   } else {
     assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
     assert(result == noreg, "");  //else change code for setting result
-    const int shift_count = (1 + byte_no)*BitsPerByte;
-
-    __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
-              ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
-
-    __ srl(  Lbyte_code, shift_count, Lbyte_code );
-    __ and3( Lbyte_code,        0xFF, Lbyte_code );
-    __ cmp(  Lbyte_code, (int)bytecode());
-    __ br(   Assembler::equal, false, Assembler::pt, resolved);
+    __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
+    __ cmp(Lbyte_code, (int) bytecode());  // have we resolved this bytecode?
+    __ br(Assembler::equal, false, Assembler::pt, resolved);
     __ delayed()->set((int)bytecode(), O1);
   }
 
@@ -2216,9 +2216,7 @@
     assert_different_registers(Rcache, index, G1_scratch);
     AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
     __ load_contents(get_field_access_count_addr, G1_scratch);
-    __ tst(G1_scratch);
-    __ br(Assembler::zero, false, Assembler::pt, Label1);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1);
 
     __ add(Rcache, in_bytes(cp_base_offset), Rcache);
 
@@ -2298,7 +2296,7 @@
   if (!is_static) {
     patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
   }
-  __ ba(false, checkVolatile);
+  __ ba(checkVolatile);
   __ delayed()->tst(Lscratch);
 
   __ bind(notObj);
@@ -2313,7 +2311,7 @@
   if (!is_static) {
     patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
   }
-  __ ba(false, checkVolatile);
+  __ ba(checkVolatile);
   __ delayed()->tst(Lscratch);
 
   __ bind(notInt);
@@ -2329,7 +2327,7 @@
   if (!is_static) {
     patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
   }
-  __ ba(false, checkVolatile);
+  __ ba(checkVolatile);
   __ delayed()->tst(Lscratch);
 
   __ bind(notLong);
@@ -2344,7 +2342,7 @@
   if (!is_static) {
     patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
   }
-  __ ba(false, checkVolatile);
+  __ ba(checkVolatile);
   __ delayed()->tst(Lscratch);
 
   __ bind(notByte);
@@ -2359,7 +2357,7 @@
   if (!is_static) {
     patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
   }
-  __ ba(false, checkVolatile);
+  __ ba(checkVolatile);
   __ delayed()->tst(Lscratch);
 
   __ bind(notChar);
@@ -2374,7 +2372,7 @@
   if (!is_static) {
     patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
   }
-  __ ba(false, checkVolatile);
+  __ ba(checkVolatile);
   __ delayed()->tst(Lscratch);
 
   __ bind(notShort);
@@ -2390,7 +2388,7 @@
   if (!is_static) {
     patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
   }
-  __ ba(false, checkVolatile);
+  __ ba(checkVolatile);
   __ delayed()->tst(Lscratch);
 
   __ bind(notFloat);
@@ -2499,9 +2497,7 @@
     Label done;
     AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
     __ load_contents(get_field_modification_count_addr, G4_scratch);
-    __ tst(G4_scratch);
-    __ br(Assembler::zero, false, Assembler::pt, done);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done);
     __ pop_ptr(G4_scratch);     // copy the object pointer from tos
     __ verify_oop(G4_scratch);
     __ push_ptr(G4_scratch);    // put the object pointer back on tos
@@ -2552,9 +2548,7 @@
     assert_different_registers(Rcache, index, G1_scratch);
     AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
     __ load_contents(get_field_modification_count_addr, G1_scratch);
-    __ tst(G1_scratch);
-    __ br(Assembler::zero, false, Assembler::pt, Label1);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1);
 
     // The Rcache and index registers have been already set.
     // This allows to eliminate this call but the Rcache and index
@@ -2584,8 +2578,7 @@
       __ br(Assembler::equal, false, Assembler::pt, two_word);
       __ delayed()->nop();
       __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
-      __ br(Assembler::always, false, Assembler::pt, valsizeknown);
-      __ delayed()->nop();
+      __ ba_short(valsizeknown);
       __ bind(two_word);
 
       __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
@@ -2636,9 +2629,7 @@
     __ and3(Rflags, Lscratch, Lscratch);
 
     if (__ membar_has_effect(read_bits)) {
-      __ tst(Lscratch);
-      __ br(Assembler::zero, false, Assembler::pt, notVolatile);
-      __ delayed()->nop();
+      __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
       volatile_barrier(read_bits);
       __ bind(notVolatile);
     }
@@ -2653,150 +2644,162 @@
 
   if (is_static) {
     // putstatic with object type most likely, check that first
-    __ cmp(Rflags, atos );
+    __ cmp(Rflags, atos);
     __ br(Assembler::notEqual, false, Assembler::pt, notObj);
-    __ delayed() ->cmp(Rflags, itos );
+    __ delayed()->cmp(Rflags, itos);
 
     // atos
-    __ pop_ptr();
-    __ verify_oop(Otos_i);
-
-    do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
-
-    __ ba(false, checkVolatile);
-    __ delayed()->tst(Lscratch);
+    {
+      __ pop_ptr();
+      __ verify_oop(Otos_i);
+      do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
+      __ ba(checkVolatile);
+      __ delayed()->tst(Lscratch);
+    }
 
     __ bind(notObj);
-
-    // cmp(Rflags, itos );
+    // cmp(Rflags, itos);
     __ br(Assembler::notEqual, false, Assembler::pt, notInt);
-    __ delayed() ->cmp(Rflags, btos );
+    __ delayed()->cmp(Rflags, btos);
 
     // itos
-    __ pop_i();
-    __ st(Otos_i, Rclass, Roffset);
-    __ ba(false, checkVolatile);
-    __ delayed()->tst(Lscratch);
+    {
+      __ pop_i();
+      __ st(Otos_i, Rclass, Roffset);
+      __ ba(checkVolatile);
+      __ delayed()->tst(Lscratch);
+    }
 
     __ bind(notInt);
-
   } else {
     // putfield with int type most likely, check that first
-    __ cmp(Rflags, itos );
+    __ cmp(Rflags, itos);
     __ br(Assembler::notEqual, false, Assembler::pt, notInt);
-    __ delayed() ->cmp(Rflags, atos );
+    __ delayed()->cmp(Rflags, atos);
 
     // itos
-    __ pop_i();
-    pop_and_check_object(Rclass);
-    __ st(Otos_i, Rclass, Roffset);
-    patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch);
-    __ ba(false, checkVolatile);
-    __ delayed()->tst(Lscratch);
+    {
+      __ pop_i();
+      pop_and_check_object(Rclass);
+      __ st(Otos_i, Rclass, Roffset);
+      patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
+      __ ba(checkVolatile);
+      __ delayed()->tst(Lscratch);
+    }
 
     __ bind(notInt);
-    // cmp(Rflags, atos );
+    // cmp(Rflags, atos);
     __ br(Assembler::notEqual, false, Assembler::pt, notObj);
-    __ delayed() ->cmp(Rflags, btos );
+    __ delayed()->cmp(Rflags, btos);
 
     // atos
-    __ pop_ptr();
-    pop_and_check_object(Rclass);
-    __ verify_oop(Otos_i);
-
-    do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
-
-    patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
-    __ ba(false, checkVolatile);
-    __ delayed()->tst(Lscratch);
+    {
+      __ pop_ptr();
+      pop_and_check_object(Rclass);
+      __ verify_oop(Otos_i);
+      do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
+      patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
+      __ ba(checkVolatile);
+      __ delayed()->tst(Lscratch);
+    }
 
     __ bind(notObj);
   }
 
-  // cmp(Rflags, btos );
+  // cmp(Rflags, btos);
   __ br(Assembler::notEqual, false, Assembler::pt, notByte);
-  __ delayed() ->cmp(Rflags, ltos );
+  __ delayed()->cmp(Rflags, ltos);
 
   // btos
-  __ pop_i();
-  if (!is_static) pop_and_check_object(Rclass);
-  __ stb(Otos_i, Rclass, Roffset);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch);
+  {
+    __ pop_i();
+    if (!is_static) pop_and_check_object(Rclass);
+    __ stb(Otos_i, Rclass, Roffset);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
+    }
+    __ ba(checkVolatile);
+    __ delayed()->tst(Lscratch);
   }
-  __ ba(false, checkVolatile);
-  __ delayed()->tst(Lscratch);
 
   __ bind(notByte);
-
-  // cmp(Rflags, ltos );
+  // cmp(Rflags, ltos);
   __ br(Assembler::notEqual, false, Assembler::pt, notLong);
-  __ delayed() ->cmp(Rflags, ctos );
+  __ delayed()->cmp(Rflags, ctos);
 
   // ltos
-  __ pop_l();
-  if (!is_static) pop_and_check_object(Rclass);
-  __ st_long(Otos_l, Rclass, Roffset);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch);
+  {
+    __ pop_l();
+    if (!is_static) pop_and_check_object(Rclass);
+    __ st_long(Otos_l, Rclass, Roffset);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
+    }
+    __ ba(checkVolatile);
+    __ delayed()->tst(Lscratch);
   }
-  __ ba(false, checkVolatile);
-  __ delayed()->tst(Lscratch);
 
   __ bind(notLong);
-
-  // cmp(Rflags, ctos );
+  // cmp(Rflags, ctos);
   __ br(Assembler::notEqual, false, Assembler::pt, notChar);
-  __ delayed() ->cmp(Rflags, stos );
+  __ delayed()->cmp(Rflags, stos);
 
   // ctos (char)
-  __ pop_i();
-  if (!is_static) pop_and_check_object(Rclass);
-  __ sth(Otos_i, Rclass, Roffset);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch);
+  {
+    __ pop_i();
+    if (!is_static) pop_and_check_object(Rclass);
+    __ sth(Otos_i, Rclass, Roffset);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
+    }
+    __ ba(checkVolatile);
+    __ delayed()->tst(Lscratch);
   }
-  __ ba(false, checkVolatile);
-  __ delayed()->tst(Lscratch);
 
   __ bind(notChar);
-  // cmp(Rflags, stos );
+  // cmp(Rflags, stos);
   __ br(Assembler::notEqual, false, Assembler::pt, notShort);
-  __ delayed() ->cmp(Rflags, ftos );
-
-  // stos (char)
-  __ pop_i();
-  if (!is_static) pop_and_check_object(Rclass);
-  __ sth(Otos_i, Rclass, Roffset);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch);
+  __ delayed()->cmp(Rflags, ftos);
+
+  // stos (short)
+  {
+    __ pop_i();
+    if (!is_static) pop_and_check_object(Rclass);
+    __ sth(Otos_i, Rclass, Roffset);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
+    }
+    __ ba(checkVolatile);
+    __ delayed()->tst(Lscratch);
   }
-  __ ba(false, checkVolatile);
-  __ delayed()->tst(Lscratch);
 
   __ bind(notShort);
-  // cmp(Rflags, ftos );
+  // cmp(Rflags, ftos);
   __ br(Assembler::notZero, false, Assembler::pt, notFloat);
   __ delayed()->nop();
 
   // ftos
-  __ pop_f();
-  if (!is_static) pop_and_check_object(Rclass);
-  __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch);
+  {
+    __ pop_f();
+    if (!is_static) pop_and_check_object(Rclass);
+    __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
+    }
+    __ ba(checkVolatile);
+    __ delayed()->tst(Lscratch);
   }
-  __ ba(false, checkVolatile);
-  __ delayed()->tst(Lscratch);
 
   __ bind(notFloat);
 
   // dtos
-  __ pop_d();
-  if (!is_static) pop_and_check_object(Rclass);
-  __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch);
+  {
+    __ pop_d();
+    if (!is_static) pop_and_check_object(Rclass);
+    __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
+    }
   }
 
   __ bind(checkVolatile);
@@ -2833,9 +2836,7 @@
     __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
     __ and3(Rflags, Lscratch, Lscratch);
     if (__ membar_has_effect(read_bits)) {
-      __ tst(Lscratch);
-      __ br(Assembler::zero, false, Assembler::pt, notVolatile);
-      __ delayed()->nop();
+      __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
       volatile_barrier(read_bits);
       __ bind(notVolatile);
     }
@@ -2864,9 +2865,7 @@
   }
 
   if (__ membar_has_effect(write_bits)) {
-    __ tst(Lscratch);
-    __ br(Assembler::zero, false, Assembler::pt, exit);
-    __ delayed()->nop();
+    __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit);
     volatile_barrier(Assembler::StoreLoad);
     __ bind(exit);
   }
@@ -3226,8 +3225,7 @@
     // the VM should throw IncompatibleClassChangeError.  linkResolver checks
     // this too but that's only if the entry isn't already resolved, so we
     // need to check again.
-    __ br_notnull( Rtemp, false, Assembler::pt, ok);
-    __ delayed()->nop();
+    __ br_notnull_short( Rtemp, Assembler::pt, ok);
     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
     __ should_not_reach_here();
     __ bind(ok);
@@ -3251,9 +3249,7 @@
   // Check for abstract method error.
   {
     Label ok;
-    __ tst(G5_method);
-    __ brx(Assembler::notZero, false, Assembler::pt, ok);
-    __ delayed()->nop();
+    __ br_notnull_short(G5_method, Assembler::pt, ok);
     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
     __ should_not_reach_here();
     __ bind(ok);
@@ -3378,7 +3374,7 @@
 
   if(UseTLAB) {
     Register RoldTopValue = RallocatedObject;
-    Register RtopAddr = G3_scratch, RtlabWasteLimitValue = G3_scratch;
+    Register RtlabWasteLimitValue = G3_scratch;
     Register RnewTopValue = G1_scratch;
     Register RendValue = Rscratch;
     Register RfreeValue = RnewTopValue;
@@ -3408,17 +3404,14 @@
 #else
       __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
 #endif
-      __ cmp(RtlabWasteLimitValue, RfreeValue);
-      __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, slow_case); // tlab waste is small
-      __ delayed()->nop();
+      __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
 
       // increment waste limit to prevent getting stuck on this slow path
       __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
       __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
     } else {
       // No allocation in the shared eden.
-      __ br(Assembler::always, false, Assembler::pt, slow_case);
-      __ delayed()->nop();
+      __ ba_short(slow_case);
     }
   }
 
@@ -3440,18 +3433,14 @@
 
     // RnewTopValue contains the top address after the new object
     // has been allocated.
-    __ cmp(RnewTopValue, RendValue);
-    __ brx(Assembler::greaterUnsigned, false, Assembler::pn, slow_case);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
 
     __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
       VM_Version::v9_instructions_work() ? NULL :
       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
 
     // if someone beat us on the allocation, try again, otherwise continue
-    __ cmp(RoldTopValue, RnewTopValue);
-    __ brx(Assembler::notEqual, false, Assembler::pn, retry);
-    __ delayed()->nop();
+    __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
 
     // bump total bytes allocated by this thread
     // RoldTopValue and RtopAddr are dead, so can use G1 and G3
@@ -3466,7 +3455,11 @@
     __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
 
     // initialize remaining object fields
-    { Label loop;
+    if (UseBlockZeroing) {
+      // Use BIS for zeroing
+      __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
+    } else {
+      Label loop;
       __ subcc(Roffset, wordSize, Roffset);
       __ bind(loop);
       //__ subcc(Roffset, wordSize, Roffset);      // executed above loop or in delay slot
@@ -3474,8 +3467,7 @@
       __ br(Assembler::notEqual, false, Assembler::pt, loop);
       __ delayed()->subcc(Roffset, wordSize, Roffset);
     }
-    __ br(Assembler::always, false, Assembler::pt, initialize_header);
-    __ delayed()->nop();
+    __ ba_short(initialize_header);
   }
 
   // slow case
@@ -3485,8 +3477,7 @@
 
   call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
 
-  __ ba(false, done);
-  __ delayed()->nop();
+  __ ba_short(done);
 
   // Initialize the header: mark, klass
   __ bind(initialize_header);
@@ -3550,8 +3541,7 @@
   Register RspecifiedKlass = O4;
 
   // Check for casting a NULL
-  __ br_null(Otos_i, false, Assembler::pn, is_null);
-  __ delayed()->nop();
+  __ br_null_short(Otos_i, Assembler::pn, is_null);
 
   // Get value klass in RobjKlass
   __ load_klass(Otos_i, RobjKlass); // get value klass
@@ -3571,8 +3561,7 @@
   call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
   __ pop_ptr(Otos_i, G3_scratch); // restore receiver
 
-  __ br(Assembler::always, false, Assembler::pt, resolved);
-  __ delayed()->nop();
+  __ ba_short(resolved);
 
   // Extract target class from constant pool
   __ bind(quicked);
@@ -3591,8 +3580,7 @@
   __ bind(cast_ok);
 
   if (ProfileInterpreter) {
-    __ ba(false, done);
-    __ delayed()->nop();
+    __ ba_short(done);
   }
   __ bind(is_null);
   __ profile_null_seen(G3_scratch);
@@ -3608,8 +3596,7 @@
   Register RspecifiedKlass = O4;
 
   // Check for casting a NULL
-  __ br_null(Otos_i, false, Assembler::pt, is_null);
-  __ delayed()->nop();
+  __ br_null_short(Otos_i, Assembler::pt, is_null);
 
   // Get value klass in RobjKlass
   __ load_klass(Otos_i, RobjKlass); // get value klass
@@ -3629,9 +3616,7 @@
   call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
   __ pop_ptr(Otos_i, G3_scratch); // restore receiver
 
-  __ br(Assembler::always, false, Assembler::pt, resolved);
-  __ delayed()->nop();
-
+  __ ba_short(resolved);
 
   // Extract target class from constant pool
   __ bind(quicked);
@@ -3649,8 +3634,7 @@
   __ clr( Otos_i );
 
   if (ProfileInterpreter) {
-    __ ba(false, done);
-    __ delayed()->nop();
+    __ ba_short(done);
   }
   __ bind(is_null);
   __ profile_null_seen(G3_scratch);
@@ -3724,7 +3708,7 @@
   {
     Label entry, loop, exit;
     __ add( __ top_most_monitor(), O2 ); // last one to check
-    __ ba( false, entry );
+    __ ba( entry );
     __ delayed()->mov( Lmonitors, O3 ); // first one to check
 
 
@@ -3757,8 +3741,7 @@
   { Label allocated;
 
     // found free slot?
-    __ br_notnull(O1, false, Assembler::pn, allocated);
-    __ delayed()->nop();
+    __ br_notnull_short(O1, Assembler::pn, allocated);
 
     __ add_monitor_to_stack( false, O2, O3 );
     __ mov(Lmonitors, O1);
@@ -3791,7 +3774,7 @@
 
   { Label entry, loop, found;
     __ add( __ top_most_monitor(), O2 ); // last one to check
-    __ ba(false, entry );
+    __ ba(entry);
     // use Lscratch to hold monitor elem to check, start with most recent monitor,
     // By using a local it survives the call to the C routine.
     __ delayed()->mov( Lmonitors, Lscratch );
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -44,24 +44,58 @@
   PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
   PrefetchFieldsAhead         = prefetch_fields_ahead();
 
+  assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 1, "invalid value");
+  if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0;
+  if( AllocatePrefetchInstr > 1 ) AllocatePrefetchInstr = 0;
+
   // Allocation prefetch settings
-  intx cache_line_size = L1_data_cache_line_size();
+  intx cache_line_size = prefetch_data_size();
   if( cache_line_size > AllocatePrefetchStepSize )
     AllocatePrefetchStepSize = cache_line_size;
-  if( FLAG_IS_DEFAULT(AllocatePrefetchLines) )
-    AllocatePrefetchLines = 3; // Optimistic value
-  assert( AllocatePrefetchLines > 0, "invalid value");
-  if( AllocatePrefetchLines < 1 ) // set valid value in product VM
-    AllocatePrefetchLines = 1; // Conservative value
+
+  assert(AllocatePrefetchLines > 0, "invalid value");
+  if( AllocatePrefetchLines < 1 )     // set valid value in product VM
+    AllocatePrefetchLines = 3;
+  assert(AllocateInstancePrefetchLines > 0, "invalid value");
+  if( AllocateInstancePrefetchLines < 1 ) // set valid value in product VM
+    AllocateInstancePrefetchLines = 1;
 
   AllocatePrefetchDistance = allocate_prefetch_distance();
   AllocatePrefetchStyle    = allocate_prefetch_style();
 
-  assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
+  assert((AllocatePrefetchDistance % AllocatePrefetchStepSize) == 0 &&
+         (AllocatePrefetchDistance > 0), "invalid value");
+  if ((AllocatePrefetchDistance % AllocatePrefetchStepSize) != 0 ||
+      (AllocatePrefetchDistance <= 0)) {
+    AllocatePrefetchDistance = AllocatePrefetchStepSize;
+  }
+
+  if (AllocatePrefetchStyle == 3 && !has_blk_init()) {
+    warning("BIS instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
+  }
+
+  if (has_v9()) {
+    assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
+    if (ArraycopySrcPrefetchDistance >= 4096)
+      ArraycopySrcPrefetchDistance = 4064;
+    assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
+    if (ArraycopyDstPrefetchDistance >= 4096)
+      ArraycopyDstPrefetchDistance = 4064;
+  } else {
+    if (ArraycopySrcPrefetchDistance > 0) {
+      warning("prefetch instructions are not available on this CPU");
+      FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
+    }
+    if (ArraycopyDstPrefetchDistance > 0) {
+      warning("prefetch instructions are not available on this CPU");
+      FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
+    }
+  }
 
   UseSSE = 0; // Only on x86 and x64
 
-  _supports_cx8               = has_v9();
+  _supports_cx8 = has_v9();
 
   if (is_niagara()) {
     // Indirect branch is the same cost as direct
@@ -94,19 +128,42 @@
       FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
     }
     if (is_niagara_plus()) {
-      if (has_blk_init() && AllocatePrefetchStyle > 0 &&
-          FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
-        // Use BIS instruction for allocation prefetch.
-        FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
+      if (has_blk_init() && UseTLAB &&
+          FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
+        // Use BIS instruction for TLAB allocation prefetch.
+        FLAG_SET_ERGO(intx, AllocatePrefetchInstr, 1);
+        if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
+          FLAG_SET_ERGO(intx, AllocatePrefetchStyle, 3);
+        }
         if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
-          // Use smaller prefetch distance on N2 with BIS
+          // Use smaller prefetch distance with BIS
           FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
         }
       }
+      if (is_T4()) {
+        // Double number of prefetched cache lines on T4
+        // since L2 cache line size is smaller (32 bytes).
+        if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
+          FLAG_SET_ERGO(intx, AllocatePrefetchLines, AllocatePrefetchLines*2);
+        }
+        if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
+          FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, AllocateInstancePrefetchLines*2);
+        }
+      }
       if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
         // Use different prefetch distance without BIS
         FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
       }
+      if (AllocatePrefetchInstr == 1) {
+        // Need a space at the end of TLAB for BIS since it
+        // will fault when accessing memory outside of heap.
+
+        // +1 for rounding up to next cache line, +1 to be safe
+        int lines = AllocatePrefetchLines + 2;
+        int step_size = AllocatePrefetchStepSize;
+        int distance = AllocatePrefetchDistance;
+        _reserve_for_allocation_prefetch = (distance + step_size*lines)/(int)HeapWordSize;
+      }
     }
 #endif
   }
@@ -116,27 +173,69 @@
     if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
       FLAG_SET_DEFAULT(UsePopCountInstruction, true);
     }
+  } else if (UsePopCountInstruction) {
+    warning("POPC instruction is not available on this CPU");
+    FLAG_SET_DEFAULT(UsePopCountInstruction, false);
+  }
+
+  // T4 and newer Sparc cpus have new compare and branch instruction.
+  if (has_cbcond()) {
+    if (FLAG_IS_DEFAULT(UseCBCond)) {
+      FLAG_SET_DEFAULT(UseCBCond, true);
+    }
+  } else if (UseCBCond) {
+    warning("CBCOND instruction is not available on this CPU");
+    FLAG_SET_DEFAULT(UseCBCond, false);
+  }
+
+  assert(BlockZeroingLowLimit > 0, "invalid value");
+  if (has_block_zeroing()) {
+    if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
+      FLAG_SET_DEFAULT(UseBlockZeroing, true);
+    }
+  } else if (UseBlockZeroing) {
+    warning("BIS zeroing instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(UseBlockZeroing, false);
+  }
+
+  assert(BlockCopyLowLimit > 0, "invalid value");
+  if (has_block_zeroing()) { // has_blk_init() && is_T4(): core's local L2 cache
+    if (FLAG_IS_DEFAULT(UseBlockCopy)) {
+      FLAG_SET_DEFAULT(UseBlockCopy, true);
+    }
+  } else if (UseBlockCopy) {
+    warning("BIS instructions are not available or expensive on this CPU");
+    FLAG_SET_DEFAULT(UseBlockCopy, false);
   }
 
 #ifdef COMPILER2
+  // T4 and newer Sparc cpus have fast RDPC.
+  if (has_fast_rdpc() && FLAG_IS_DEFAULT(UseRDPCForConstantTableBase)) {
+//    FLAG_SET_DEFAULT(UseRDPCForConstantTableBase, true);
+  }
+
   // Currently not supported anywhere.
   FLAG_SET_DEFAULT(UseFPUForSpilling, false);
+
+  assert((InteriorEntryAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
 #endif
 
+  assert((CodeEntryAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
+  assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
+
   char buf[512];
-  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
-               (has_v8() ? ", has_v8" : ""),
-               (has_v9() ? ", has_v9" : ""),
+  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+               (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
                (has_hardware_popc() ? ", popc" : ""),
-               (has_vis1() ? ", has_vis1" : ""),
-               (has_vis2() ? ", has_vis2" : ""),
-               (has_vis3() ? ", has_vis3" : ""),
-               (has_blk_init() ? ", has_blk_init" : ""),
-               (is_ultra3() ? ", is_ultra3" : ""),
-               (is_sun4v() ? ", is_sun4v" : ""),
-               (is_niagara() ? ", is_niagara" : ""),
-               (is_niagara_plus() ? ", is_niagara_plus" : ""),
-               (is_sparc64() ? ", is_sparc64" : ""),
+               (has_vis1() ? ", vis1" : ""),
+               (has_vis2() ? ", vis2" : ""),
+               (has_vis3() ? ", vis3" : ""),
+               (has_blk_init() ? ", blk_init" : ""),
+               (has_cbcond() ? ", cbcond" : ""),
+               (is_ultra3() ? ", ultra3" : ""),
+               (is_sun4v() ? ", sun4v" : ""),
+               (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
+               (is_sparc64() ? ", sparc64" : ""),
                (!has_hardware_mul32() ? ", no-mul32" : ""),
                (!has_hardware_div32() ? ", no-div32" : ""),
                (!has_hardware_fsmuld() ? ", no-fsmuld" : ""));
@@ -144,16 +243,34 @@
   // buf is started with ", " or is empty
   _features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf);
 
+  // UseVIS is set to the smallest of what hardware supports and what
+  // the command line requires.  I.e., you cannot set UseVIS to 3 on
+  // older UltraSparc which do not support it.
+  if (UseVIS > 3) UseVIS=3;
+  if (UseVIS < 0) UseVIS=0;
+  if (!has_vis3()) // Drop to 2 if no VIS3 support
+    UseVIS = MIN2((intx)2,UseVIS);
+  if (!has_vis2()) // Drop to 1 if no VIS2 support
+    UseVIS = MIN2((intx)1,UseVIS);
+  if (!has_vis1()) // Drop to 0 if no VIS1 support
+    UseVIS = 0;
+
 #ifndef PRODUCT
   if (PrintMiscellaneous && Verbose) {
-    tty->print("Allocation: ");
+    tty->print("Allocation");
     if (AllocatePrefetchStyle <= 0) {
-      tty->print_cr("no prefetching");
+      tty->print_cr(": no prefetching");
     } else {
+      tty->print(" prefetching: ");
+      if (AllocatePrefetchInstr == 0) {
+          tty->print("PREFETCH");
+      } else if (AllocatePrefetchInstr == 1) {
+          tty->print("BIS");
+      }
       if (AllocatePrefetchLines > 1) {
-        tty->print_cr("PREFETCH %d, %d lines of size %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize);
+        tty->print_cr(" at distance %d, %d lines of %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize);
       } else {
-        tty->print_cr("PREFETCH %d, one line", AllocatePrefetchDistance);
+        tty->print_cr(" at distance %d, one line of %d bytes", AllocatePrefetchDistance, AllocatePrefetchStepSize);
       }
     }
     if (PrefetchCopyIntervalInBytes > 0) {
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -31,44 +31,46 @@
 class VM_Version: public Abstract_VM_Version {
 protected:
   enum Feature_Flag {
-    v8_instructions    = 0,
-    hardware_mul32     = 1,
-    hardware_div32     = 2,
-    hardware_fsmuld    = 3,
-    hardware_popc      = 4,
-    v9_instructions    = 5,
-    vis1_instructions  = 6,
-    vis2_instructions  = 7,
-    sun4v_instructions = 8,
+    v8_instructions      = 0,
+    hardware_mul32       = 1,
+    hardware_div32       = 2,
+    hardware_fsmuld      = 3,
+    hardware_popc        = 4,
+    v9_instructions      = 5,
+    vis1_instructions    = 6,
+    vis2_instructions    = 7,
+    sun4v_instructions   = 8,
     blk_init_instructions = 9,
-    fmaf_instructions  = 10,
-    fmau_instructions  = 11,
-    vis3_instructions  = 12,
-    sparc64_family     = 13,
-    T_family           = 14,
-    T1_model           = 15
+    fmaf_instructions    = 10,
+    fmau_instructions    = 11,
+    vis3_instructions    = 12,
+    sparc64_family       = 13,
+    T_family             = 14,
+    T1_model             = 15,
+    cbcond_instructions  = 16
   };
 
   enum Feature_Flag_Set {
     unknown_m           = 0,
     all_features_m      = -1,
 
-    v8_instructions_m   = 1 << v8_instructions,
-    hardware_mul32_m    = 1 << hardware_mul32,
-    hardware_div32_m    = 1 << hardware_div32,
-    hardware_fsmuld_m   = 1 << hardware_fsmuld,
-    hardware_popc_m     = 1 << hardware_popc,
-    v9_instructions_m   = 1 << v9_instructions,
-    vis1_instructions_m = 1 << vis1_instructions,
-    vis2_instructions_m = 1 << vis2_instructions,
-    sun4v_m             = 1 << sun4v_instructions,
+    v8_instructions_m       = 1 << v8_instructions,
+    hardware_mul32_m        = 1 << hardware_mul32,
+    hardware_div32_m        = 1 << hardware_div32,
+    hardware_fsmuld_m       = 1 << hardware_fsmuld,
+    hardware_popc_m         = 1 << hardware_popc,
+    v9_instructions_m       = 1 << v9_instructions,
+    vis1_instructions_m     = 1 << vis1_instructions,
+    vis2_instructions_m     = 1 << vis2_instructions,
+    sun4v_m                 = 1 << sun4v_instructions,
     blk_init_instructions_m = 1 << blk_init_instructions,
-    fmaf_instructions_m = 1 << fmaf_instructions,
-    fmau_instructions_m = 1 << fmau_instructions,
-    vis3_instructions_m = 1 << vis3_instructions,
-    sparc64_family_m    = 1 << sparc64_family,
-    T_family_m          = 1 << T_family,
-    T1_model_m          = 1 << T1_model,
+    fmaf_instructions_m     = 1 << fmaf_instructions,
+    fmau_instructions_m     = 1 << fmau_instructions,
+    vis3_instructions_m     = 1 << vis3_instructions,
+    sparc64_family_m        = 1 << sparc64_family,
+    T_family_m              = 1 << T_family,
+    T1_model_m              = 1 << T1_model,
+    cbcond_instructions_m   = 1 << cbcond_instructions,
 
     generic_v8_m        = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
     generic_v9_m        = generic_v8_m | v9_instructions_m,
@@ -111,25 +113,35 @@
   static bool has_vis2()                { return (_features & vis2_instructions_m) != 0; }
   static bool has_vis3()                { return (_features & vis3_instructions_m) != 0; }
   static bool has_blk_init()            { return (_features & blk_init_instructions_m) != 0; }
+  static bool has_cbcond()              { return (_features & cbcond_instructions_m) != 0; }
 
   static bool supports_compare_and_exchange()
                                         { return has_v9(); }
 
-  static bool is_ultra3()               { return (_features & ultra3_m) == ultra3_m; }
-  static bool is_sun4v()                { return (_features & sun4v_m) != 0; }
   // Returns true if the platform is in the niagara line (T series)
   // and newer than the niagara1.
   static bool is_niagara_plus()         { return is_T_family(_features) && !is_T1_model(_features); }
+  static bool is_T4()                   { return is_T_family(_features) && has_cbcond(); }
+
   // Fujitsu SPARC64
   static bool is_sparc64()              { return (_features & sparc64_family_m) != 0; }
 
+  static bool is_sun4v()                { return (_features & sun4v_m) != 0; }
+  static bool is_ultra3()               { return (_features & ultra3_m) == ultra3_m && !is_sun4v() && !is_sparc64(); }
+
   static bool has_fast_fxtof()          { return is_niagara() || is_sparc64() || has_v9() && !is_ultra3(); }
   static bool has_fast_idiv()           { return is_niagara_plus() || is_sparc64(); }
 
+  // T4 and newer Sparc have fast RDPC instruction.
+  static bool has_fast_rdpc()           { return is_T4(); }
+
+  // On T4 and newer Sparc BIS to the beginning of cache line always zeros it.
+  static bool has_block_zeroing()       { return has_blk_init() && is_T4(); }
+
   static const char* cpu_features()     { return _features_str; }
 
-  static intx L1_data_cache_line_size()  {
-    return 64;  // default prefetch block size on sparc
+  static intx prefetch_data_size()  {
+    return is_T4() ? 32 : 64;  // default prefetch block size on sparc
   }
 
   // Prefetch
--- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -76,9 +76,7 @@
     Label L;
     // check offset vs vtable length
     __ ld(G3_scratch, instanceKlass::vtable_length_offset()*wordSize, G5);
-    __ cmp(G5, vtable_index*vtableEntry::size());
-    __ br(Assembler::greaterUnsigned, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L);
     __ set(vtable_index, O2);
     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2);
     __ bind(L);
@@ -95,8 +93,7 @@
 #ifndef PRODUCT
   if (DebugVtables) {
     Label L;
-    __ br_notnull(G5_method, false, Assembler::pt, L);
-    __ delayed()->nop();
+    __ br_notnull_short(G5_method, Assembler::pt, L);
     __ stop("Vtable entry is ZERO");
     __ bind(L);
   }
@@ -177,8 +174,7 @@
 #ifndef PRODUCT
   if (DebugVtables) {
     Label L01;
-    __ bpr(Assembler::rc_nz, false, Assembler::pt, L5_method, L01);
-    __ delayed()->nop();
+    __ br_notnull_short(L5_method, Assembler::pt, L01);
     __ stop("methodOop is null");
     __ bind(L01);
     __ verify_oop(L5_method);
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1339,9 +1339,8 @@
   emit_operand(rax, dst);
 }
 
-void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) {
-  InstructionMark im(this);
-  relocate(rtype);
+void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
+  InstructionMark im(this);
   assert((0 <= cc) && (cc < 16), "illegal cc");
   if (L.is_bound()) {
     address dst = target(L);
@@ -1350,7 +1349,7 @@
     const int short_size = 2;
     const int long_size = 6;
     intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
-    if (rtype == relocInfo::none && is8bit(offs - short_size)) {
+    if (maybe_short && is8bit(offs - short_size)) {
       // 0111 tttn #8-bit disp
       emit_byte(0x70 | cc);
       emit_byte((offs - short_size) & 0xFF);
@@ -1399,7 +1398,7 @@
   emit_operand(rsp, adr);
 }
 
-void Assembler::jmp(Label& L, relocInfo::relocType rtype) {
+void Assembler::jmp(Label& L, bool maybe_short) {
   if (L.is_bound()) {
     address entry = target(L);
     assert(entry != NULL, "jmp most probably wrong");
@@ -1407,7 +1406,7 @@
     const int short_size = 2;
     const int long_size = 5;
     intptr_t offs = entry - _code_pos;
-    if (rtype == relocInfo::none && is8bit(offs - short_size)) {
+    if (maybe_short && is8bit(offs - short_size)) {
       emit_byte(0xEB);
       emit_byte((offs - short_size) & 0xFF);
     } else {
@@ -1420,7 +1419,6 @@
     // the forward jump will not run beyond 256 bytes, use jmpb to
     // force an 8-bit displacement.
     InstructionMark im(this);
-    relocate(rtype);
     L.add_patch_at(code(), locator());
     emit_byte(0xE9);
     emit_long(0);
@@ -2309,7 +2307,7 @@
 }
 
 void Assembler::prefetchnta(Address src) {
-  NOT_LP64(assert(VM_Version::supports_sse2(), "must support"));
+  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
   InstructionMark im(this);
   prefetch_prefix(src);
   emit_byte(0x18);
@@ -2317,7 +2315,7 @@
 }
 
 void Assembler::prefetchr(Address src) {
-  NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support"));
+  assert(VM_Version::supports_3dnow_prefetch(), "must support");
   InstructionMark im(this);
   prefetch_prefix(src);
   emit_byte(0x0D);
@@ -2349,7 +2347,7 @@
 }
 
 void Assembler::prefetchw(Address src) {
-  NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support"));
+  assert(VM_Version::supports_3dnow_prefetch(), "must support");
   InstructionMark im(this);
   prefetch_prefix(src);
   emit_byte(0x0D);
@@ -3674,7 +3672,7 @@
     } else {
       if (adr.index_needs_rex()) {
         prefix(REX_X);
-      } else if (reg->encoding() >= 4 ) {
+      } else if (byteinst && reg->encoding() >= 4 ) {
         prefix(REX);
       }
     }
@@ -8006,15 +8004,10 @@
                                                 Register temp_reg) {
   assert_different_registers(vmslots_reg, mh_reg, temp_reg);
   // load mh.type.form.vmslots
-  if (java_lang_invoke_MethodHandle::vmslots_offset_in_bytes() != 0) {
-    // hoist vmslots into every mh to avoid dependent load chain
-    movl(vmslots_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmslots_offset_in_bytes, temp_reg)));
-  } else {
-    Register temp2_reg = vmslots_reg;
-    load_heap_oop(temp2_reg, Address(mh_reg,    delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)));
-    load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)));
-    movl(vmslots_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)));
-  }
+  Register temp2_reg = vmslots_reg;
+  load_heap_oop(temp2_reg, Address(mh_reg,    delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)));
+  load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)));
+  movl(vmslots_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)));
 }
 
 
--- a/src/cpu/x86/vm/assembler_x86.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1065,8 +1065,7 @@
   // Note: The same Label can be used for forward and backward branches
   // but it may be bound only once.
 
-  void jcc(Condition cc, Label& L,
-           relocInfo::relocType rtype = relocInfo::none);
+  void jcc(Condition cc, Label& L, bool maybe_short = true);
 
   // Conditional jump to a 8-bit offset to L.
   // WARNING: be very careful using this for forward jumps.  If the label is
@@ -1077,7 +1076,7 @@
   void jmp(Address entry);    // pc <- entry
 
   // Label operations & relative jumps (PPUM Appendix D)
-  void jmp(Label& L, relocInfo::relocType rtype = relocInfo::none);   // unconditional jump to L
+  void jmp(Label& L, bool maybe_short = true);   // unconditional jump to L
 
   void jmp(Register entry); // pc <- entry
 
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -129,10 +129,6 @@
   return FrameMap::receiver_opr;
 }
 
-LIR_Opr LIR_Assembler::incomingReceiverOpr() {
-  return receiverOpr();
-}
-
 LIR_Opr LIR_Assembler::osrBufferPointer() {
   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 }
@@ -371,55 +367,6 @@
 }
 
 
-void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) {
-  if (exception->is_valid()) {
-    // preserve exception
-    // note: the monitor_exit runtime call is a leaf routine
-    //       and cannot block => no GC can happen
-    // The slow case (MonitorAccessStub) uses the first two stack slots
-    // ([esp+0] and [esp+4]), therefore we store the exception at [esp+8]
-    __ movptr (Address(rsp, 2*wordSize), exception);
-  }
-
-  Register obj_reg  = obj_opr->as_register();
-  Register lock_reg = lock_opr->as_register();
-
-  // setup registers (lock_reg must be rax, for lock_object)
-  assert(obj_reg != SYNC_header && lock_reg != SYNC_header, "rax, must be available here");
-  Register hdr = lock_reg;
-  assert(new_hdr == SYNC_header, "wrong register");
-  lock_reg = new_hdr;
-  // compute pointer to BasicLock
-  Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no);
-  __ lea(lock_reg, lock_addr);
-  // unlock object
-  MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no);
-  // _slow_case_stubs->append(slow_case);
-  // temporary fix: must be created after exceptionhandler, therefore as call stub
-  _slow_case_stubs->append(slow_case);
-  if (UseFastLocking) {
-    // try inlined fast unlocking first, revert to slow locking if it fails
-    // note: lock_reg points to the displaced header since the displaced header offset is 0!
-    assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
-    __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
-  } else {
-    // always do slow unlocking
-    // note: the slow unlocking code could be inlined here, however if we use
-    //       slow unlocking, speed doesn't matter anyway and this solution is
-    //       simpler and requires less duplicated code - additionally, the
-    //       slow unlocking code is the same in either case which simplifies
-    //       debugging
-    __ jmp(*slow_case->entry());
-  }
-  // done
-  __ bind(*slow_case->continuation());
-
-  if (exception->is_valid()) {
-    // restore exception
-    __ movptr (exception, Address(rsp, 2 * wordSize));
-  }
-}
-
 // This specifies the rsp decrement needed to build the frame
 int LIR_Assembler::initial_frame_size_in_bytes() {
   // if rounding, must let FrameMap know!
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -29,8 +29,6 @@
 
   Address::ScaleFactor array_element_size(BasicType type) const;
 
-  void monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception);
-
   void arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack);
 
   // helper functions which checks for overflow and sets bailout if it
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1465,19 +1465,6 @@
       }
       break;
 
-    case jvmti_exception_throw_id:
-      { // rax,: exception oop
-        StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
-        // Preserve all registers across this potentially blocking call
-        const int num_rt_args = 2;  // thread, exception oop
-        OopMap* map = save_live_registers(sasm, num_rt_args);
-        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
-        oop_maps = new OopMapSet();
-        oop_maps->add_gc_map(call_offset, map);
-        restore_live_registers(sasm);
-      }
-      break;
-
     case dtrace_object_alloc_id:
       { // rax,: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
--- a/src/cpu/x86/vm/frame_x86.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/frame_x86.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -666,3 +666,9 @@
 
 }
 #endif
+
+intptr_t *frame::initial_deoptimization_info() {
+  // used to reset the saved FP
+  return fp();
+}
+
--- a/src/cpu/x86/vm/icache_x86.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/icache_x86.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -43,8 +43,8 @@
 #ifdef AMD64
   enum {
     stub_size      = 64, // Size of the icache flush stub in bytes
-    line_size      = 32, // Icache line size in bytes
-    log2_line_size = 5   // log2(line_size)
+    line_size      = 64, // Icache line size in bytes
+    log2_line_size = 6   // log2(line_size)
   };
 
   // Use default implementation
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -233,7 +233,7 @@
 
 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index,
                                                            int bcp_offset, size_t index_size) {
-  assert(cache != index, "must use different registers");
+  assert_different_registers(cache, index);
   get_cache_index_at_bcp(index, bcp_offset, index_size);
   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
@@ -241,6 +241,20 @@
 }
 
 
+void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
+                                                                        Register index,
+                                                                        Register bytecode,
+                                                                        int byte_no,
+                                                                        int bcp_offset,
+                                                                        size_t index_size) {
+  get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
+  movptr(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+  const int shift_count = (1 + byte_no) * BitsPerByte;
+  shrptr(bytecode, shift_count);
+  andptr(bytecode, 0xFF);
+}
+
+
 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
                                                                int bcp_offset, size_t index_size) {
   assert(cache != tmp, "must use different register");
@@ -403,9 +417,9 @@
     // interp_only_mode if these events CAN be enabled.
     get_thread(temp);
     // interp_only is an int, on little endian it is sufficient to test the byte only
-    // Is a cmpl faster (ce
+    // Is a cmpl faster?
     cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
-    jcc(Assembler::zero, run_compiled_code);
+    jccb(Assembler::zero, run_compiled_code);
     jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
     bind(run_compiled_code);
   }
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -83,6 +83,7 @@
   }
   void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
   void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
   void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
   void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
 
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -233,7 +233,7 @@
                                                            Register index,
                                                            int bcp_offset,
                                                            size_t index_size) {
-  assert(cache != index, "must use different registers");
+  assert_different_registers(cache, index);
   get_cache_index_at_bcp(index, bcp_offset, index_size);
   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
@@ -242,6 +242,22 @@
 }
 
 
+void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
+                                                                        Register index,
+                                                                        Register bytecode,
+                                                                        int byte_no,
+                                                                        int bcp_offset,
+                                                                        size_t index_size) {
+  get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
+  // We use a 32-bit load here since the layout of 64-bit words on
+  // little-endian machines allow us that.
+  movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+  const int shift_count = (1 + byte_no) * BitsPerByte;
+  shrl(bytecode, shift_count);
+  andl(bytecode, 0xFF);
+}
+
+
 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
                                                                Register tmp,
                                                                int bcp_offset,
@@ -402,7 +418,7 @@
     // interp_only is an int, on little endian it is sufficient to test the byte only
     // Is a cmpl faster?
     cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
-    jcc(Assembler::zero, run_compiled_code);
+    jccb(Assembler::zero, run_compiled_code);
     jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
     bind(run_compiled_code);
   }
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -100,13 +100,11 @@
   }
 
   void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
-  void get_cache_and_index_at_bcp(Register cache, Register index,
-                                  int bcp_offset, size_t index_size = sizeof(u2));
-  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
-                                      int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
+  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
   void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
 
-
   void pop_ptr(Register r = rax);
   void pop_i(Register r = rax);
   void pop_l(Register r = rax);
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -546,6 +546,28 @@
 }
 #endif //ASSERT
 
+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp) {
+  if (JvmtiExport::can_post_interpreter_events()) {
+    Label run_compiled_code;
+    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+    // compiled code in threads for which the event is enabled.  Check here for
+    // interp_only_mode if these events CAN be enabled.
+#ifdef _LP64
+    Register rthread = r15_thread;
+#else
+    Register rthread = temp;
+    __ get_thread(rthread);
+#endif
+    // interp_only is an int, on little endian it is sufficient to test the byte only
+    // Is a cmpl faster?
+    __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
+    __ jccb(Assembler::zero, run_compiled_code);
+    __ jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
+    __ bind(run_compiled_code);
+  }
+  __ jmp(Address(method, methodOopDesc::from_interpreted_offset()));
+}
+
 // Code generation
 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
   // rbx: methodOop
@@ -602,6 +624,11 @@
 
   // error path for invokeExact (only)
   __ bind(invoke_exact_error_path);
+  // ensure that the top of stack is properly aligned.
+  __ mov(rdi, rsp);
+  __ andptr(rsp, -StackAlignmentInBytes); // Align the stack for the ABI
+  __ pushptr(Address(rdi, 0));  // Pick up the return address
+
   // Stub wants expected type in rax and the actual type in rcx
   __ jump(ExternalAddress(StubRoutines::throw_WrongMethodTypeException_entry()));
 
@@ -1120,9 +1147,6 @@
   guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
 
   // some handy addresses
-  Address rbx_method_fie(     rbx,      methodOopDesc::from_interpreted_offset() );
-  Address rbx_method_fce(     rbx,      methodOopDesc::from_compiled_offset() );
-
   Address rcx_mh_vmtarget(    rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
   Address rcx_dmh_vmindex(    rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );
 
@@ -1163,8 +1187,8 @@
       assert(raise_exception_method(), "must be set");
       assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
 
-      const Register rdi_pc = rax;
-      __ pop(rdi_pc);  // caller PC
+      const Register rax_pc = rax;
+      __ pop(rax_pc);  // caller PC
       __ mov(rsp, saved_last_sp);  // cut the stack back to where the caller started
 
       Register rbx_method = rbx_temp;
@@ -1172,11 +1196,15 @@
 
       const int jobject_oop_offset = 0;
       __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
-      __ verify_oop(rbx_method);
+
+      __ movptr(saved_last_sp, rsp);
+      __ subptr(rsp, 3 * wordSize);
+      __ push(rax_pc);         // restore caller PC
 
-      NOT_LP64(__ push(rarg2_required));
-      __ push(rdi_pc);         // restore caller PC
-      __ jmp(rbx_method_fce);  // jump to compiled entry
+      __ movl  (__ argument_address(constant(2)), rarg0_code);
+      __ movptr(__ argument_address(constant(1)), rarg1_actual);
+      __ movptr(__ argument_address(constant(0)), rarg2_required);
+      jump_from_method_handle(_masm, rbx_method, rax);
     }
     break;
 
@@ -1195,7 +1223,7 @@
         __ null_check(rcx_recv);
         __ verify_oop(rcx_recv);
       }
-      __ jmp(rbx_method_fie);
+      jump_from_method_handle(_masm, rbx_method, rax);
     }
     break;
 
@@ -1228,7 +1256,7 @@
       __ movptr(rbx_method, vtable_entry_addr);
 
       __ verify_oop(rbx_method);
-      __ jmp(rbx_method_fie);
+      jump_from_method_handle(_masm, rbx_method, rax);
     }
     break;
 
@@ -1263,7 +1291,7 @@
                                  no_such_interface);
 
       __ verify_oop(rbx_method);
-      __ jmp(rbx_method_fie);
+      jump_from_method_handle(_masm, rbx_method, rax);
       __ hlt();
 
       __ bind(no_such_interface);
@@ -1311,7 +1339,7 @@
         Register rbx_method = rbx_temp;
         __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
         __ verify_oop(rbx_method);
-        __ jmp(rbx_method_fie);
+        jump_from_method_handle(_masm, rbx_method, rax);
       } else {
         __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
         __ verify_oop(rcx_recv);
@@ -1320,6 +1348,13 @@
     }
     break;
 
+  case _adapter_opt_profiling:
+    if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) {
+      Address rcx_mh_vmcount(rcx_recv, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes());
+      __ incrementl(rcx_mh_vmcount);
+    }
+    // fall through
+
   case _adapter_retype_only:
   case _adapter_retype_raw:
     // immediately jump to the next MH layer:
--- a/src/cpu/x86/vm/methodHandles_x86.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/methodHandles_x86.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -110,6 +110,7 @@
 
 class RicochetFrame {
   friend class MethodHandles;
+  friend class VMStructs;
 
  private:
   intptr_t* _continuation;          // what to do when control gets back here
@@ -291,6 +292,10 @@
                  "reference is a MH");
   }
 
+  // Similar to InterpreterMacroAssembler::jump_from_interpreted.
+  // Takes care of special dispatch from single stepping too.
+  static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp);
+
   static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
 
   static Register saved_last_sp_register() {
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2471,7 +2471,7 @@
   __ movl(counter, rbx);
 
   // Pick up the initial fp we should save
-  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
+  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
 
   // Now adjust the caller's stack to make up for the extra locals
   // but record the original sp so that we can save it in the skeletal interpreter
@@ -2691,7 +2691,7 @@
   __ movl(counter, rbx);
 
   // Pick up the initial fp we should save
-  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
+  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
 
   // Now adjust the caller's stack to make up for the extra locals
   // but record the original sp so that we can save it in the skeletal interpreter
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2730,7 +2730,7 @@
   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
 
   // Pick up the initial fp we should save
-  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
+  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
 
   // Now adjust the caller's stack to make up for the extra locals
   // but record the original sp so that we can save it in the skeletal interpreter
@@ -2922,7 +2922,7 @@
   // Pick up the initial fp we should save
   __ movptr(rbp,
             Address(rdi,
-                    Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
+                    Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
 
   // Now adjust the caller's stack to make up for the extra locals but
   // record the original sp so that we can save it in the skeletal
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2187,7 +2187,7 @@
   // either at call sites or otherwise assume that stack unwinding will be initiated,
   // so caller saved registers were assumed volatile in the compiler.
   address generate_throw_exception(const char* name, address runtime_entry,
-                                   bool restore_saved_exception_pc, Register arg1 = noreg, Register arg2 = noreg) {
+                                   Register arg1 = noreg, Register arg2 = noreg) {
 
     int insts_size = 256;
     int locs_size  = 32;
@@ -2204,10 +2204,6 @@
     // differently than the real call_VM
     Register java_thread = rbx;
     __ get_thread(java_thread);
-    if (restore_saved_exception_pc) {
-      __ movptr(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset())));
-      __ push(rax);
-    }
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
 
@@ -2323,7 +2319,7 @@
     StubRoutines::_throw_WrongMethodTypeException_entry =
       generate_throw_exception("WrongMethodTypeException throw_exception",
                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
-                               false, rax, rcx);
+                               rax, rcx);
   }
 
 
@@ -2332,12 +2328,10 @@
 
     // These entry points require SharedInfo::stack0 to be set up in non-core builds
     // and need to be relocatable, so they each fabricate a RuntimeStub internally.
-    StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
-    StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
-    StubRoutines::_throw_ArithmeticException_entry         = generate_throw_exception("ArithmeticException throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException),  true);
-    StubRoutines::_throw_NullPointerException_entry        = generate_throw_exception("NullPointerException throw_exception",         CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
-    StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
-    StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError),   false);
+    StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
+    StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
+    StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
+    StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
 
     //------------------------------------------------------------------------------------------------------------------------
     // entry points that are platform specific
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -935,6 +935,8 @@
     __ pusha();                       // push registers
     Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
 
+    // FIXME: this probably needs alignment logic
+
     __ subptr(rsp, frame::arg_reg_save_area_bytes);
     BLOCK_COMMENT("call handle_unsafe_access");
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
@@ -2934,7 +2936,6 @@
   // caller saved registers were assumed volatile in the compiler.
   address generate_throw_exception(const char* name,
                                    address runtime_entry,
-                                   bool restore_saved_exception_pc,
                                    Register arg1 = noreg,
                                    Register arg2 = noreg) {
     // Information about frame layout at time of blocking runtime call.
@@ -2962,12 +2963,6 @@
     // which has the ability to fetch the return PC out of
     // thread-local storage and also sets up last_Java_sp slightly
     // differently than the real call_VM
-    if (restore_saved_exception_pc) {
-      __ movptr(rax,
-                Address(r15_thread,
-                        in_bytes(JavaThread::saved_exception_pc_offset())));
-      __ push(rax);
-    }
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
 
@@ -3068,7 +3063,7 @@
     StubRoutines::_throw_WrongMethodTypeException_entry =
       generate_throw_exception("WrongMethodTypeException throw_exception",
                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
-                               false, rax, rcx);
+                               rax, rcx);
   }
 
   void generate_all() {
@@ -3081,43 +3076,25 @@
       generate_throw_exception("AbstractMethodError throw_exception",
                                CAST_FROM_FN_PTR(address,
                                                 SharedRuntime::
-                                                throw_AbstractMethodError),
-                               false);
+                                                throw_AbstractMethodError));
 
     StubRoutines::_throw_IncompatibleClassChangeError_entry =
       generate_throw_exception("IncompatibleClassChangeError throw_exception",
                                CAST_FROM_FN_PTR(address,
                                                 SharedRuntime::
-                                                throw_IncompatibleClassChangeError),
-                               false);
-
-    StubRoutines::_throw_ArithmeticException_entry =
-      generate_throw_exception("ArithmeticException throw_exception",
-                               CAST_FROM_FN_PTR(address,
-                                                SharedRuntime::
-                                                throw_ArithmeticException),
-                               true);
-
-    StubRoutines::_throw_NullPointerException_entry =
-      generate_throw_exception("NullPointerException throw_exception",
-                               CAST_FROM_FN_PTR(address,
-                                                SharedRuntime::
-                                                throw_NullPointerException),
-                               true);
+                                                throw_IncompatibleClassChangeError));
 
     StubRoutines::_throw_NullPointerException_at_call_entry =
       generate_throw_exception("NullPointerException at call throw_exception",
                                CAST_FROM_FN_PTR(address,
                                                 SharedRuntime::
-                                                throw_NullPointerException_at_call),
-                               false);
+                                                throw_NullPointerException_at_call));
 
     StubRoutines::_throw_StackOverflowError_entry =
       generate_throw_exception("StackOverflowError throw_exception",
                                CAST_FROM_FN_PTR(address,
                                                 SharedRuntime::
-                                                throw_StackOverflowError),
-                               false);
+                                                throw_StackOverflowError));
 
     // entry points that are platform specific
     StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -202,45 +202,74 @@
 }
 
 
-void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
-                                   Register scratch,
-                                   bool load_bc_into_scratch/*=true*/) {
-
-  if (!RewriteBytecodes) return;
-  // the pair bytecodes have already done the load.
-  if (load_bc_into_scratch) {
-    __ movl(bc, bytecode);
+void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
+                                   Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
+                                   int byte_no) {
+  if (!RewriteBytecodes)  return;
+  Label L_patch_done;
+
+  switch (bc) {
+  case Bytecodes::_fast_aputfield:
+  case Bytecodes::_fast_bputfield:
+  case Bytecodes::_fast_cputfield:
+  case Bytecodes::_fast_dputfield:
+  case Bytecodes::_fast_fputfield:
+  case Bytecodes::_fast_iputfield:
+  case Bytecodes::_fast_lputfield:
+  case Bytecodes::_fast_sputfield:
+    {
+      // We skip bytecode quickening for putfield instructions when
+      // the put_code written to the constant pool cache is zero.
+      // This is required so that every execution of this instruction
+      // calls out to InterpreterRuntime::resolve_get_put to do
+      // additional, required work.
+      assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+      assert(load_bc_into_bc_reg, "we use bc_reg as temp");
+      __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
+      __ movl(bc_reg, bc);
+      __ cmpl(temp_reg, (int) 0);
+      __ jcc(Assembler::zero, L_patch_done);  // don't patch
+    }
+    break;
+  default:
+    assert(byte_no == -1, "sanity");
+    // the pair bytecodes have already done the load.
+    if (load_bc_into_bc_reg) {
+      __ movl(bc_reg, bc);
+    }
   }
-  Label patch_done;
+
   if (JvmtiExport::can_post_breakpoint()) {
-    Label fast_patch;
+    Label L_fast_patch;
     // if a breakpoint is present we can't rewrite the stream directly
-    __ movzbl(scratch, at_bcp(0));
-    __ cmpl(scratch, Bytecodes::_breakpoint);
-    __ jcc(Assembler::notEqual, fast_patch);
-    __ get_method(scratch);
+    __ movzbl(temp_reg, at_bcp(0));
+    __ cmpl(temp_reg, Bytecodes::_breakpoint);
+    __ jcc(Assembler::notEqual, L_fast_patch);
+    __ get_method(temp_reg);
     // Let breakpoint table handling rewrite to quicker bytecode
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rsi, bc_reg);
 #ifndef ASSERT
-    __ jmpb(patch_done);
+    __ jmpb(L_patch_done);
 #else
-    __ jmp(patch_done);
+    __ jmp(L_patch_done);
 #endif
-    __ bind(fast_patch);
+    __ bind(L_fast_patch);
   }
+
 #ifdef ASSERT
-  Label okay;
-  __ load_unsigned_byte(scratch, at_bcp(0));
-  __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
-  __ jccb(Assembler::equal, okay);
-  __ cmpl(scratch, bc);
-  __ jcc(Assembler::equal, okay);
+  Label L_okay;
+  __ load_unsigned_byte(temp_reg, at_bcp(0));
+  __ cmpl(temp_reg, (int)Bytecodes::java_code(bc));
+  __ jccb(Assembler::equal, L_okay);
+  __ cmpl(temp_reg, bc_reg);
+  __ jcc(Assembler::equal, L_okay);
   __ stop("patching the wrong bytecode");
-  __ bind(okay);
+  __ bind(L_okay);
 #endif
+
   // patch bytecode
-  __ movb(at_bcp(0), bc);
-  __ bind(patch_done);
+  __ movb(at_bcp(0), bc_reg);
+  __ bind(L_patch_done);
 }
 
 //----------------------------------------------------------------------------------------------------
@@ -2060,24 +2089,20 @@
   assert_different_registers(result, Rcache, index, temp);
 
   Label resolved;
-  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
   if (byte_no == f1_oop) {
     // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
     // This kind of CP cache entry does not need to match the flags byte, because
     // there is a 1-1 relation between bytecode type and CP entry type.
     assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
+    __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
     __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
     __ testptr(result, result);
     __ jcc(Assembler::notEqual, resolved);
   } else {
     assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
     assert(result == noreg, "");  //else change code for setting result
-    const int shift_count = (1 + byte_no)*BitsPerByte;
-    __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
-    __ shrl(temp, shift_count);
-    // have we resolved this bytecode?
-    __ andl(temp, 0xFF);
-    __ cmpl(temp, (int)bytecode());
+    __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
+    __ cmpl(temp, (int) bytecode());  // have we resolved this bytecode?
     __ jcc(Assembler::equal, resolved);
   }
 
@@ -2453,138 +2478,153 @@
 
   __ shrl(flags, ConstantPoolCacheEntry::tosBits);
   assert(btos == 0, "change code, btos != 0");
-  // btos
   __ andl(flags, 0x0f);
   __ jcc(Assembler::notZero, notByte);
 
-  __ pop(btos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movb(lo, rax );
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
+  // btos
+  {
+    __ pop(btos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movb(lo, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notByte);
-  // itos
-  __ cmpl(flags, itos );
+  __ cmpl(flags, itos);
   __ jcc(Assembler::notEqual, notInt);
 
-  __ pop(itos);
-  if (!is_static) pop_and_check_object(obj);
-
-  __ movl(lo, rax );
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
+  // itos
+  {
+    __ pop(itos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movl(lo, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notInt);
-  // atos
-  __ cmpl(flags, atos );
+  __ cmpl(flags, atos);
   __ jcc(Assembler::notEqual, notObj);
 
-  __ pop(atos);
-  if (!is_static) pop_and_check_object(obj);
-
-  do_oop_store(_masm, lo, rax, _bs->kind(), false);
-
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
+  // atos
+  {
+    __ pop(atos);
+    if (!is_static) pop_and_check_object(obj);
+    do_oop_store(_masm, lo, rax, _bs->kind(), false);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
 
-  __ jmp(Done);
-
   __ bind(notObj);
-  // ctos
-  __ cmpl(flags, ctos );
+  __ cmpl(flags, ctos);
   __ jcc(Assembler::notEqual, notChar);
 
-  __ pop(ctos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movw(lo, rax );
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
+  // ctos
+  {
+    __ pop(ctos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movw(lo, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notChar);
-  // stos
-  __ cmpl(flags, stos );
+  __ cmpl(flags, stos);
   __ jcc(Assembler::notEqual, notShort);
 
-  __ pop(stos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movw(lo, rax );
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
+  // stos
+  {
+    __ pop(stos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movw(lo, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notShort);
-  // ltos
-  __ cmpl(flags, ltos );
+  __ cmpl(flags, ltos);
   __ jcc(Assembler::notEqual, notLong);
 
-  Label notVolatileLong;
-  __ testl(rdx, rdx);
-  __ jcc(Assembler::zero, notVolatileLong);
-
-  __ pop(ltos);  // overwrites rdx, do this after testing volatile.
-  if (!is_static) pop_and_check_object(obj);
-
-  // Replace with real volatile test
-  __ push(rdx);
-  __ push(rax);                 // Must update atomically with FIST
-  __ fild_d(Address(rsp,0));    // So load into FPU register
-  __ fistp_d(lo);               // and put into memory atomically
-  __ addptr(rsp, 2*wordSize);
-  // volatile_barrier();
-  volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
-                                               Assembler::StoreStore));
-  // Don't rewrite volatile version
-  __ jmp(notVolatile);
-
-  __ bind(notVolatileLong);
-
-  __ pop(ltos);  // overwrites rdx
-  if (!is_static) pop_and_check_object(obj);
-  NOT_LP64(__ movptr(hi, rdx));
-  __ movptr(lo, rax);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
+  // ltos
+  {
+    Label notVolatileLong;
+    __ testl(rdx, rdx);
+    __ jcc(Assembler::zero, notVolatileLong);
+
+    __ pop(ltos);  // overwrites rdx, do this after testing volatile.
+    if (!is_static) pop_and_check_object(obj);
+
+    // Replace with real volatile test
+    __ push(rdx);
+    __ push(rax);                 // Must update atomically with FIST
+    __ fild_d(Address(rsp,0));    // So load into FPU register
+    __ fistp_d(lo);               // and put into memory atomically
+    __ addptr(rsp, 2*wordSize);
+    // volatile_barrier();
+    volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
+                                                 Assembler::StoreStore));
+    // Don't rewrite volatile version
+    __ jmp(notVolatile);
+
+    __ bind(notVolatileLong);
+
+    __ pop(ltos);  // overwrites rdx
+    if (!is_static) pop_and_check_object(obj);
+    NOT_LP64(__ movptr(hi, rdx));
+    __ movptr(lo, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
+    }
+    __ jmp(notVolatile);
   }
-  __ jmp(notVolatile);
 
   __ bind(notLong);
-  // ftos
-  __ cmpl(flags, ftos );
+  __ cmpl(flags, ftos);
   __ jcc(Assembler::notEqual, notFloat);
 
-  __ pop(ftos);
-  if (!is_static) pop_and_check_object(obj);
-  __ fstp_s(lo);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
+  // ftos
+  {
+    __ pop(ftos);
+    if (!is_static) pop_and_check_object(obj);
+    __ fstp_s(lo);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notFloat);
-  // dtos
-  __ cmpl(flags, dtos );
+#ifdef ASSERT
+  __ cmpl(flags, dtos);
   __ jcc(Assembler::notEqual, notDouble);
-
-  __ pop(dtos);
-  if (!is_static) pop_and_check_object(obj);
-  __ fstp_d(lo);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
+#endif
+
+  // dtos
+  {
+    __ pop(dtos);
+    if (!is_static) pop_and_check_object(obj);
+    __ fstp_d(lo);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
-
+
+#ifdef ASSERT
   __ bind(notDouble);
-
   __ stop("Bad state");
+#endif
 
   __ bind(Done);
 
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -203,46 +203,74 @@
   return Address(r13, offset);
 }
 
-void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
-                                   Register scratch,
-                                   bool load_bc_into_scratch/*=true*/) {
-  if (!RewriteBytecodes) {
-    return;
+void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
+                                   Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
+                                   int byte_no) {
+  if (!RewriteBytecodes)  return;
+  Label L_patch_done;
+
+  switch (bc) {
+  case Bytecodes::_fast_aputfield:
+  case Bytecodes::_fast_bputfield:
+  case Bytecodes::_fast_cputfield:
+  case Bytecodes::_fast_dputfield:
+  case Bytecodes::_fast_fputfield:
+  case Bytecodes::_fast_iputfield:
+  case Bytecodes::_fast_lputfield:
+  case Bytecodes::_fast_sputfield:
+    {
+      // We skip bytecode quickening for putfield instructions when
+      // the put_code written to the constant pool cache is zero.
+      // This is required so that every execution of this instruction
+      // calls out to InterpreterRuntime::resolve_get_put to do
+      // additional, required work.
+      assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+      assert(load_bc_into_bc_reg, "we use bc_reg as temp");
+      __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
+      __ movl(bc_reg, bc);
+      __ cmpl(temp_reg, (int) 0);
+      __ jcc(Assembler::zero, L_patch_done);  // don't patch
+    }
+    break;
+  default:
+    assert(byte_no == -1, "sanity");
+    // the pair bytecodes have already done the load.
+    if (load_bc_into_bc_reg) {
+      __ movl(bc_reg, bc);
+    }
   }
-  // the pair bytecodes have already done the load.
-  if (load_bc_into_scratch) {
-    __ movl(bc, bytecode);
-  }
-  Label patch_done;
+
   if (JvmtiExport::can_post_breakpoint()) {
-    Label fast_patch;
+    Label L_fast_patch;
     // if a breakpoint is present we can't rewrite the stream directly
-    __ movzbl(scratch, at_bcp(0));
-    __ cmpl(scratch, Bytecodes::_breakpoint);
-    __ jcc(Assembler::notEqual, fast_patch);
-    __ get_method(scratch);
+    __ movzbl(temp_reg, at_bcp(0));
+    __ cmpl(temp_reg, Bytecodes::_breakpoint);
+    __ jcc(Assembler::notEqual, L_fast_patch);
+    __ get_method(temp_reg);
     // Let breakpoint table handling rewrite to quicker bytecode
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg);
 #ifndef ASSERT
-    __ jmpb(patch_done);
+    __ jmpb(L_patch_done);
 #else
-    __ jmp(patch_done);
+    __ jmp(L_patch_done);
 #endif
-    __ bind(fast_patch);
+    __ bind(L_fast_patch);
   }
+
 #ifdef ASSERT
-  Label okay;
-  __ load_unsigned_byte(scratch, at_bcp(0));
-  __ cmpl(scratch, (int) Bytecodes::java_code(bytecode));
-  __ jcc(Assembler::equal, okay);
-  __ cmpl(scratch, bc);
-  __ jcc(Assembler::equal, okay);
+  Label L_okay;
+  __ load_unsigned_byte(temp_reg, at_bcp(0));
+  __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
+  __ jcc(Assembler::equal, L_okay);
+  __ cmpl(temp_reg, bc_reg);
+  __ jcc(Assembler::equal, L_okay);
   __ stop("patching the wrong bytecode");
-  __ bind(okay);
+  __ bind(L_okay);
 #endif
+
   // patch bytecode
-  __ movb(at_bcp(0), bc);
-  __ bind(patch_done);
+  __ movb(at_bcp(0), bc_reg);
+  __ bind(L_patch_done);
 }
 
 
@@ -2098,24 +2126,20 @@
   assert_different_registers(result, Rcache, index, temp);
 
   Label resolved;
-  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
   if (byte_no == f1_oop) {
     // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
     // This kind of CP cache entry does not need to match the flags byte, because
     // there is a 1-1 relation between bytecode type and CP entry type.
     assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
+    __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
     __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
     __ testptr(result, result);
     __ jcc(Assembler::notEqual, resolved);
   } else {
     assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
     assert(result == noreg, "");  //else change code for setting result
-    const int shift_count = (1 + byte_no) * BitsPerByte;
-    __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
-    __ shrl(temp, shift_count);
-    // have we resolved this bytecode?
-    __ andl(temp, 0xFF);
-    __ cmpl(temp, (int) bytecode());
+    __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
+    __ cmpl(temp, (int) bytecode());  // have we resolved this bytecode?
     __ jcc(Assembler::equal, resolved);
   }
 
@@ -2507,101 +2531,123 @@
   assert(btos == 0, "change code, btos != 0");
   __ andl(flags, 0x0f);
   __ jcc(Assembler::notZero, notByte);
+
   // btos
-  __ pop(btos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movb(field, rax);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx);
+  {
+    __ pop(btos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movb(field, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notByte);
   __ cmpl(flags, atos);
   __ jcc(Assembler::notEqual, notObj);
+
   // atos
-  __ pop(atos);
-  if (!is_static) pop_and_check_object(obj);
-
-  // Store into the field
-  do_oop_store(_masm, field, rax, _bs->kind(), false);
-
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx);
+  {
+    __ pop(atos);
+    if (!is_static) pop_and_check_object(obj);
+    // Store into the field
+    do_oop_store(_masm, field, rax, _bs->kind(), false);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notObj);
   __ cmpl(flags, itos);
   __ jcc(Assembler::notEqual, notInt);
+
   // itos
-  __ pop(itos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movl(field, rax);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx);
+  {
+    __ pop(itos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movl(field, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notInt);
   __ cmpl(flags, ctos);
   __ jcc(Assembler::notEqual, notChar);
+
   // ctos
-  __ pop(ctos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movw(field, rax);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx);
+  {
+    __ pop(ctos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movw(field, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notChar);
   __ cmpl(flags, stos);
   __ jcc(Assembler::notEqual, notShort);
+
   // stos
-  __ pop(stos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movw(field, rax);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx);
+  {
+    __ pop(stos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movw(field, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notShort);
   __ cmpl(flags, ltos);
   __ jcc(Assembler::notEqual, notLong);
+
   // ltos
-  __ pop(ltos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movq(field, rax);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx);
+  {
+    __ pop(ltos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movq(field, rax);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notLong);
   __ cmpl(flags, ftos);
   __ jcc(Assembler::notEqual, notFloat);
+
   // ftos
-  __ pop(ftos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movflt(field, xmm0);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx);
+  {
+    __ pop(ftos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movflt(field, xmm0);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
+    }
+    __ jmp(Done);
   }
-  __ jmp(Done);
 
   __ bind(notFloat);
 #ifdef ASSERT
   __ cmpl(flags, dtos);
   __ jcc(Assembler::notEqual, notDouble);
 #endif
+
   // dtos
-  __ pop(dtos);
-  if (!is_static) pop_and_check_object(obj);
-  __ movdbl(field, xmm0);
-  if (!is_static) {
-    patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx);
+  {
+    __ pop(dtos);
+    if (!is_static) pop_and_check_object(obj);
+    __ movdbl(field, xmm0);
+    if (!is_static) {
+      patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
+    }
   }
 
 #ifdef ASSERT
@@ -2612,12 +2658,12 @@
 #endif
 
   __ bind(Done);
+
   // Check for volatile store
   __ testl(rdx, rdx);
   __ jcc(Assembler::zero, notVolatile);
   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
                                                Assembler::StoreStore));
-
   __ bind(notVolatile);
 }
 
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -321,6 +321,20 @@
   if (UseSSE < 2) UseSSE = 2;
 #endif
 
+#ifdef AMD64
+  // flush_icache_stub have to be generated first.
+  // That is why Icache line size is hard coded in ICache class,
+  // see icache_x86.hpp. It is also the reason why we can't use
+  // clflush instruction in 32-bit VM since it could be running
+  // on CPU which does not support it.
+  //
+  // The only thing we can do is to verify that flushed
+  // ICache::line_size has correct value.
+  guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
+  // clflush_size is size in quadwords (8 bytes).
+  guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
+#endif
+
   // If the OS doesn't support SSE, we can't use this feature even if the HW does
   if (!os::supports_sse())
     _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
@@ -543,14 +557,16 @@
   if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3;
 
   // Allocation prefetch settings
-  intx cache_line_size = L1_data_cache_line_size();
+  intx cache_line_size = prefetch_data_size();
   if( cache_line_size > AllocatePrefetchStepSize )
     AllocatePrefetchStepSize = cache_line_size;
-  if( FLAG_IS_DEFAULT(AllocatePrefetchLines) )
-    AllocatePrefetchLines = 3; // Optimistic value
+
   assert(AllocatePrefetchLines > 0, "invalid value");
-  if( AllocatePrefetchLines < 1 ) // set valid value in product VM
-    AllocatePrefetchLines = 1; // Conservative value
+  if( AllocatePrefetchLines < 1 )     // set valid value in product VM
+    AllocatePrefetchLines = 3;
+  assert(AllocateInstancePrefetchLines > 0, "invalid value");
+  if( AllocateInstancePrefetchLines < 1 ) // set valid value in product VM
+    AllocateInstancePrefetchLines = 1;
 
   AllocatePrefetchDistance = allocate_prefetch_distance();
   AllocatePrefetchStyle    = allocate_prefetch_style();
@@ -587,10 +603,11 @@
     tty->print_cr("Logical CPUs per core: %u",
                   logical_processors_per_package());
     tty->print_cr("UseSSE=%d",UseSSE);
-    tty->print("Allocation: ");
+    tty->print("Allocation");
     if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
-      tty->print_cr("no prefetching");
+      tty->print_cr(": no prefetching");
     } else {
+      tty->print(" prefetching: ");
       if (UseSSE == 0 && supports_3dnow_prefetch()) {
         tty->print("PREFETCHW");
       } else if (UseSSE >= 1) {
@@ -605,9 +622,9 @@
         }
       }
       if (AllocatePrefetchLines > 1) {
-        tty->print_cr(" %d, %d lines with step %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize);
+        tty->print_cr(" at distance %d, %d lines of %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize);
       } else {
-        tty->print_cr(" %d, one line", AllocatePrefetchDistance);
+        tty->print_cr(" at distance %d, one line of %d bytes", AllocatePrefetchDistance, AllocatePrefetchStepSize);
       }
     }
 
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -91,7 +91,9 @@
                cmpxchg8 : 1,
                         : 6,
                cmov     : 1,
-                        : 7,
+                        : 3,
+               clflush  : 1,
+                        : 3,
                mmx      : 1,
                fxsr     : 1,
                sse      : 1,
@@ -417,7 +419,7 @@
     return result;
   }
 
-  static intx L1_data_cache_line_size()  {
+  static intx prefetch_data_size()  {
     intx result = 0;
     if (is_intel()) {
       result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
--- a/src/cpu/x86/vm/x86_32.ad	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/x86_32.ad	Thu Dec 22 15:46:11 2011 +0000
@@ -1369,7 +1369,12 @@
 //
 // NOTE: If the platform does not provide any short branch variants, then
 //       this method should return false for offset 0.
-bool Matcher::is_short_branch_offset(int rule, int offset) {
+bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
+  // The passed offset is relative to address of the branch.
+  // On 86 a branch displacement is calculated relative to address
+  // of a next instruction.
+  offset -= br_size;
+
   // the short version of jmpConUCF2 contains multiple branches,
   // making the reach slightly less
   if (rule == jmpConUCF2_rule)
@@ -1713,18 +1718,6 @@
     else                               emit_d32(cbuf,con);
   %}
 
-  enc_class Lbl (label labl) %{ // JMP, CALL
-    Label *l = $labl$$label;
-    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0);
-  %}
-
-  enc_class LblShort (label labl) %{ // JMP, CALL
-    Label *l = $labl$$label;
-    int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0;
-    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
-    emit_d8(cbuf, disp);
-  %}
-
   enc_class OpcSReg (eRegI dst) %{    // BSWAP
     emit_cc(cbuf, $secondary, $dst$$reg );
   %}
@@ -1747,21 +1740,6 @@
     emit_rm(cbuf, 0x3, $secondary, $div$$reg );
   %}
 
-  enc_class Jcc (cmpOp cop, label labl) %{    // JCC
-    Label *l = $labl$$label;
-    $$$emit8$primary;
-    emit_cc(cbuf, $secondary, $cop$$cmpcode);
-    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0);
-  %}
-
-  enc_class JccShort (cmpOp cop, label labl) %{    // JCC
-    Label *l = $labl$$label;
-    emit_cc(cbuf, $primary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0;
-    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
-    emit_d8(cbuf, disp);
-  %}
-
   enc_class enc_cmov(cmpOp cop ) %{ // CMOV
     $$$emit8$primary;
     emit_cc(cbuf, $secondary, $cop$$cmpcode);
@@ -4496,7 +4474,6 @@
 //----------Instruction Attributes---------------------------------------------
 ins_attrib ins_cost(100);       // Required cost attribute
 ins_attrib ins_size(8);         // Required size attribute (in bits)
-ins_attrib ins_pc_relative(0);  // Required PC Relative flag
 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
                                 // non-matching short branch variant of some
                                                             // long branch?
@@ -7348,8 +7325,9 @@
   ins_cost(100);
 
   format %{ "PREFETCHR $mem\t! Prefetch into level 1 cache for read" %}
-  opcode(0x0F, 0x0d);     /* Opcode 0F 0d /0 */
-  ins_encode(OpcP, OpcS, RMopc_Mem(0x00,mem));
+  ins_encode %{
+    __ prefetchr($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
@@ -7359,8 +7337,9 @@
   ins_cost(100);
 
   format %{ "PREFETCHNTA $mem\t! Prefetch into non-temporal cache for read" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /0 */
-  ins_encode(OpcP, OpcS, RMopc_Mem(0x00,mem));
+  ins_encode %{
+    __ prefetchnta($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
@@ -7370,8 +7349,9 @@
   ins_cost(100);
 
   format %{ "PREFETCHT0 $mem\t! Prefetch into L1 and L2 caches for read" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /1 */
-  ins_encode(OpcP, OpcS, RMopc_Mem(0x01,mem));
+  ins_encode %{
+    __ prefetcht0($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
@@ -7381,8 +7361,9 @@
   ins_cost(100);
 
   format %{ "PREFETCHT2 $mem\t! Prefetch into L2 cache for read" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /3 */
-  ins_encode(OpcP, OpcS, RMopc_Mem(0x03,mem));
+  ins_encode %{
+    __ prefetcht2($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
@@ -7397,46 +7378,86 @@
 %}
 
 instruct prefetchw( memory mem ) %{
-  predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || AllocatePrefetchInstr==3);
+  predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch());
   match( PrefetchWrite mem );
   ins_cost(100);
 
   format %{ "PREFETCHW $mem\t! Prefetch into L1 cache and mark modified" %}
-  opcode(0x0F, 0x0D);     /* Opcode 0F 0D /1 */
-  ins_encode(OpcP, OpcS, RMopc_Mem(0x01,mem));
+  ins_encode %{
+    __ prefetchw($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
 instruct prefetchwNTA( memory mem ) %{
-  predicate(UseSSE>=1 && AllocatePrefetchInstr==0);
+  predicate(UseSSE>=1);
   match(PrefetchWrite mem);
   ins_cost(100);
 
   format %{ "PREFETCHNTA $mem\t! Prefetch into non-temporal cache for write" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /0 */
-  ins_encode(OpcP, OpcS, RMopc_Mem(0x00,mem));
+  ins_encode %{
+    __ prefetchnta($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
-instruct prefetchwT0( memory mem ) %{
-  predicate(UseSSE>=1 && AllocatePrefetchInstr==1);
-  match(PrefetchWrite mem);
+// Prefetch instructions for allocation.
+
+instruct prefetchAlloc0( memory mem ) %{
+  predicate(UseSSE==0 && AllocatePrefetchInstr!=3);
+  match(PrefetchAllocation mem);
+  ins_cost(0);
+  size(0);
+  format %{ "Prefetch allocation (non-SSE is empty encoding)" %}
+  ins_encode();
+  ins_pipe(empty);
+%}
+
+instruct prefetchAlloc( memory mem ) %{
+  predicate(AllocatePrefetchInstr==3);
+  match( PrefetchAllocation mem );
   ins_cost(100);
 
-  format %{ "PREFETCHT0 $mem\t! Prefetch into L1 and L2 caches for write" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /1 */
-  ins_encode(OpcP, OpcS, RMopc_Mem(0x01,mem));
+  format %{ "PREFETCHW $mem\t! Prefetch allocation into L1 cache and mark modified" %}
+  ins_encode %{
+    __ prefetchw($mem$$Address);
+  %}
+  ins_pipe(ialu_mem);
+%}
+
+instruct prefetchAllocNTA( memory mem ) %{
+  predicate(UseSSE>=1 && AllocatePrefetchInstr==0);
+  match(PrefetchAllocation mem);
+  ins_cost(100);
+
+  format %{ "PREFETCHNTA $mem\t! Prefetch allocation into non-temporal cache for write" %}
+  ins_encode %{
+    __ prefetchnta($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
-instruct prefetchwT2( memory mem ) %{
-  predicate(UseSSE>=1 && AllocatePrefetchInstr==2);
-  match(PrefetchWrite mem);
+instruct prefetchAllocT0( memory mem ) %{
+  predicate(UseSSE>=1 && AllocatePrefetchInstr==1);
+  match(PrefetchAllocation mem);
   ins_cost(100);
 
-  format %{ "PREFETCHT2 $mem\t! Prefetch into L2 cache for write" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /3 */
-  ins_encode(OpcP, OpcS, RMopc_Mem(0x03,mem));
+  format %{ "PREFETCHT0 $mem\t! Prefetch allocation into L1 and L2 caches for write" %}
+  ins_encode %{
+    __ prefetcht0($mem$$Address);
+  %}
+  ins_pipe(ialu_mem);
+%}
+
+instruct prefetchAllocT2( memory mem ) %{
+  predicate(UseSSE>=1 && AllocatePrefetchInstr==2);
+  match(PrefetchAllocation mem);
+  ins_cost(100);
+
+  format %{ "PREFETCHT2 $mem\t! Prefetch allocation into L2 cache for write" %}
+  ins_encode %{
+    __ prefetcht2($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
@@ -7806,8 +7827,7 @@
 %}
 
 instruct membar_acquire_lock() %{
-  match(MemBarAcquire);
-  predicate(Matcher::prior_fast_lock(n));
+  match(MemBarAcquireLock);
   ins_cost(0);
 
   size(0);
@@ -7827,8 +7847,7 @@
 %}
 
 instruct membar_release_lock() %{
-  match(MemBarRelease);
-  predicate(Matcher::post_fast_unlock(n));
+  match(MemBarReleaseLock);
   ins_cost(0);
 
   size(0);
@@ -13047,7 +13066,6 @@
     Address index(noreg, $switch_val$$Register, Address::times_1);
     __ jump(ArrayAddress($constantaddress, index));
   %}
-  ins_pc_relative(1);
   ins_pipe(pipe_jmp);
 %}
 
@@ -13059,10 +13077,11 @@
   ins_cost(300);
   format %{ "JMP    $labl" %}
   size(5);
-  opcode(0xE9);
-  ins_encode( OpcP, Lbl( labl ) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jmp(*L, false); // Always long jump
+  %}
   ins_pipe( pipe_jmp );
-  ins_pc_relative(1);
 %}
 
 // Jump Direct Conditional - Label defines a relative address from Jcc+1
@@ -13073,10 +13092,11 @@
   ins_cost(300);
   format %{ "J$cop    $labl" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode( Jcc( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
 %}
 
 // Jump Direct Conditional - Label defines a relative address from Jcc+1
@@ -13087,10 +13107,11 @@
   ins_cost(300);
   format %{ "J$cop    $labl\t# Loop end" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode( Jcc( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
 %}
 
 // Jump Direct Conditional - Label defines a relative address from Jcc+1
@@ -13101,10 +13122,11 @@
   ins_cost(300);
   format %{ "J$cop,u  $labl\t# Loop end" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode( Jcc( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
 %}
 
 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
@@ -13114,10 +13136,11 @@
   ins_cost(200);
   format %{ "J$cop,u  $labl\t# Loop end" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode( Jcc( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
 %}
 
 // Jump Direct Conditional - using unsigned comparison
@@ -13128,10 +13151,11 @@
   ins_cost(300);
   format %{ "J$cop,u  $labl" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode(Jcc(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 instruct jmpConUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
@@ -13141,10 +13165,11 @@
   ins_cost(200);
   format %{ "J$cop,u  $labl" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode(Jcc(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 instruct jmpConUCF2(cmpOpUCF2 cop, eFlagsRegUCF cmp, label labl) %{
@@ -13162,31 +13187,21 @@
       $$emit$$"done:"
     }
   %}
-  size(12);
-  opcode(0x0F, 0x80);
   ins_encode %{
     Label* l = $labl$$label;
-    $$$emit8$primary;
-    emit_cc(cbuf, $secondary, Assembler::parity);
-    int parity_disp = -1;
-    bool ok = false;
     if ($cop$$cmpcode == Assembler::notEqual) {
-       // the two jumps 6 bytes apart so the jump distances are too
-       parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
+      __ jcc(Assembler::parity, *l, false);
+      __ jcc(Assembler::notEqual, *l, false);
     } else if ($cop$$cmpcode == Assembler::equal) {
-       parity_disp = 6;
-       ok = true;
+      Label done;
+      __ jccb(Assembler::parity, done);
+      __ jcc(Assembler::equal, *l, false);
+      __ bind(done);
     } else {
        ShouldNotReachHere();
     }
-    emit_d32(cbuf, parity_disp);
-    $$$emit8$primary;
-    emit_cc(cbuf, $secondary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
-    emit_d32(cbuf, disp);
   %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 // ============================================================================
@@ -13251,10 +13266,11 @@
   ins_cost(300);
   format %{ "JMP,s  $labl" %}
   size(2);
-  opcode(0xEB);
-  ins_encode( OpcP, LblShort( labl ) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jmpb(*L);
+  %}
   ins_pipe( pipe_jmp );
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -13266,10 +13282,11 @@
   ins_cost(300);
   format %{ "J$cop,s  $labl" %}
   size(2);
-  opcode(0x70);
-  ins_encode( JccShort( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -13281,10 +13298,11 @@
   ins_cost(300);
   format %{ "J$cop,s  $labl\t# Loop end" %}
   size(2);
-  opcode(0x70);
-  ins_encode( JccShort( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -13296,10 +13314,11 @@
   ins_cost(300);
   format %{ "J$cop,us $labl\t# Loop end" %}
   size(2);
-  opcode(0x70);
-  ins_encode( JccShort( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -13310,10 +13329,11 @@
   ins_cost(300);
   format %{ "J$cop,us $labl\t# Loop end" %}
   size(2);
-  opcode(0x70);
-  ins_encode( JccShort( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -13325,10 +13345,11 @@
   ins_cost(300);
   format %{ "J$cop,us $labl" %}
   size(2);
-  opcode(0x70);
-  ins_encode( JccShort( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -13339,10 +13360,11 @@
   ins_cost(300);
   format %{ "J$cop,us $labl" %}
   size(2);
-  opcode(0x70);
-  ins_encode( JccShort( cop, labl) );
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe( pipe_jcc );
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -13362,27 +13384,21 @@
     }
   %}
   size(4);
-  opcode(0x70);
   ins_encode %{
     Label* l = $labl$$label;
-    emit_cc(cbuf, $primary, Assembler::parity);
-    int parity_disp = -1;
     if ($cop$$cmpcode == Assembler::notEqual) {
-      parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
+      __ jccb(Assembler::parity, *l);
+      __ jccb(Assembler::notEqual, *l);
     } else if ($cop$$cmpcode == Assembler::equal) {
-      parity_disp = 2;
+      Label done;
+      __ jccb(Assembler::parity, done);
+      __ jccb(Assembler::equal, *l);
+      __ bind(done);
     } else {
-      ShouldNotReachHere();
-    }
-    emit_d8(cbuf, parity_disp);
-    emit_cc(cbuf, $primary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
-    emit_d8(cbuf, disp);
-    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
-    assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
+       ShouldNotReachHere();
+    }
   %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -13855,7 +13871,6 @@
               call_epilog,
               post_call_FPU );
   ins_pipe( pipe_slow );
-  ins_pc_relative(1);
   ins_alignment(4);
 %}
 
@@ -13879,7 +13894,6 @@
               call_epilog,
               post_call_FPU );
   ins_pipe( pipe_slow );
-  ins_pc_relative(1);
   ins_alignment(4);
 %}
 
@@ -13899,7 +13913,6 @@
               call_epilog,
               post_call_FPU );
   ins_pipe( pipe_slow );
-  ins_pc_relative(1);
   ins_alignment(4);
 %}
 
@@ -13917,7 +13930,6 @@
               Java_To_Runtime( meth ),
               post_call_FPU );
   ins_pipe( pipe_slow );
-  ins_pc_relative(1);
 %}
 
 // Call runtime without safepoint
@@ -13933,7 +13945,6 @@
               Java_To_Runtime( meth ),
               Verify_FPU_For_Leaf, post_call_FPU );
   ins_pipe( pipe_slow );
-  ins_pc_relative(1);
 %}
 
 instruct CallLeafNoFPDirect(method meth) %{
@@ -13945,7 +13956,6 @@
   opcode(0xE8); /* E8 cd */
   ins_encode(Java_To_Runtime(meth));
   ins_pipe( pipe_slow );
-  ins_pc_relative(1);
 %}
 
 
@@ -14024,7 +14034,6 @@
   format %{ "FASTLOCK $object, $box KILLS $tmp,$scr" %}
   ins_encode( Fast_Lock(object,box,tmp,scr) );
   ins_pipe( pipe_slow );
-  ins_pc_relative(1);
 %}
 
 instruct cmpFastUnlock( eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
@@ -14034,7 +14043,6 @@
   format %{ "FASTUNLOCK $object, $box, $tmp" %}
   ins_encode( Fast_Unlock(object,box,tmp) );
   ins_pipe( pipe_slow );
-  ins_pc_relative(1);
 %}
 
 
--- a/src/cpu/x86/vm/x86_64.ad	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/x86/vm/x86_64.ad	Thu Dec 22 15:46:11 2011 +0000
@@ -1966,7 +1966,12 @@
 //
 // NOTE: If the platform does not provide any short branch variants, then
 //       this method should return false for offset 0.
-bool Matcher::is_short_branch_offset(int rule, int offset) {
+bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
+  // The passed offset is relative to address of the branch.
+  // On 86 a branch displacement is calculated relative to address
+  // of a next instruction.
+  offset -= br_size;
+
   // the short version of jmpConUCF2 contains multiple branches,
   // making the reach slightly less
   if (rule == jmpConUCF2_rule)
@@ -2426,22 +2431,6 @@
     }
   %}
 
-  enc_class Lbl(label labl)
-  %{
-    // JMP, CALL
-    Label* l = $labl$$label;
-    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0);
-  %}
-
-  enc_class LblShort(label labl)
-  %{
-    // JMP, CALL
-    Label* l = $labl$$label;
-    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
-    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
-    emit_d8(cbuf, disp);
-  %}
-
   enc_class opc2_reg(rRegI dst)
   %{
     // BSWAP
@@ -2460,25 +2449,6 @@
     emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7);
   %}
 
-  enc_class Jcc(cmpOp cop, label labl)
-  %{
-    // JCC
-    Label* l = $labl$$label;
-    $$$emit8$primary;
-    emit_cc(cbuf, $secondary, $cop$$cmpcode);
-    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0);
-  %}
-
-  enc_class JccShort (cmpOp cop, label labl)
-  %{
-  // JCC
-    Label *l = $labl$$label;
-    emit_cc(cbuf, $primary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
-    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
-    emit_d8(cbuf, disp);
-  %}
-
   enc_class enc_cmov(cmpOp cop)
   %{
     // CMOV
@@ -4013,7 +3983,6 @@
 //----------Instruction Attributes---------------------------------------------
 ins_attrib ins_cost(100);       // Required cost attribute
 ins_attrib ins_size(8);         // Required size attribute (in bits)
-ins_attrib ins_pc_relative(0);  // Required PC Relative flag
 ins_attrib ins_short_branch(0); // Required flag: is this instruction
                                 // a non-matching short branch variant
                                 // of some long branch?
@@ -6648,8 +6617,9 @@
   ins_cost(125);
 
   format %{ "PREFETCHR $mem\t# Prefetch into level 1 cache" %}
-  opcode(0x0F, 0x0D);     /* Opcode 0F 0D /0 */
-  ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
+  ins_encode %{
+    __ prefetchr($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
@@ -6659,8 +6629,9 @@
   ins_cost(125);
 
   format %{ "PREFETCHNTA $mem\t# Prefetch into non-temporal cache for read" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /0 */
-  ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
+  ins_encode %{
+    __ prefetchnta($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
@@ -6670,8 +6641,9 @@
   ins_cost(125);
 
   format %{ "PREFETCHT0 $mem\t# prefetch into L1 and L2 caches for read" %}
-  opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */
-  ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
+  ins_encode %{
+    __ prefetcht0($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
@@ -6681,52 +6653,70 @@
   ins_cost(125);
 
   format %{ "PREFETCHT2 $mem\t# prefetch into L2 caches for read" %}
-  opcode(0x0F, 0x18); /* Opcode 0F 18 /3 */
-  ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem));
-  ins_pipe(ialu_mem);
-%}
-
-instruct prefetchw( memory mem ) %{
-  predicate(AllocatePrefetchInstr==3);
-  match(PrefetchWrite mem);
-  ins_cost(125);
-
-  format %{ "PREFETCHW $mem\t# Prefetch into level 1 cache and mark modified" %}
-  opcode(0x0F, 0x0D);     /* Opcode 0F 0D /1 */
-  ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
+  ins_encode %{
+    __ prefetcht2($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
 instruct prefetchwNTA( memory mem ) %{
-  predicate(AllocatePrefetchInstr==0);
   match(PrefetchWrite mem);
   ins_cost(125);
 
   format %{ "PREFETCHNTA $mem\t# Prefetch to non-temporal cache for write" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /0 */
-  ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
+  ins_encode %{
+    __ prefetchnta($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
-instruct prefetchwT0( memory mem ) %{
-  predicate(AllocatePrefetchInstr==1);
-  match(PrefetchWrite mem);
+// Prefetch instructions for allocation.
+
+instruct prefetchAlloc( memory mem ) %{
+  predicate(AllocatePrefetchInstr==3);
+  match(PrefetchAllocation mem);
+  ins_cost(125);
+
+  format %{ "PREFETCHW $mem\t# Prefetch allocation into level 1 cache and mark modified" %}
+  ins_encode %{
+    __ prefetchw($mem$$Address);
+  %}
+  ins_pipe(ialu_mem);
+%}
+
+instruct prefetchAllocNTA( memory mem ) %{
+  predicate(AllocatePrefetchInstr==0);
+  match(PrefetchAllocation mem);
   ins_cost(125);
 
-  format %{ "PREFETCHT0 $mem\t# Prefetch to level 1 and 2 caches for write" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /1 */
-  ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
+  format %{ "PREFETCHNTA $mem\t# Prefetch allocation to non-temporal cache for write" %}
+  ins_encode %{
+    __ prefetchnta($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
-instruct prefetchwT2( memory mem ) %{
-  predicate(AllocatePrefetchInstr==2);
-  match(PrefetchWrite mem);
+instruct prefetchAllocT0( memory mem ) %{
+  predicate(AllocatePrefetchInstr==1);
+  match(PrefetchAllocation mem);
   ins_cost(125);
 
-  format %{ "PREFETCHT2 $mem\t# Prefetch to level 2 cache for write" %}
-  opcode(0x0F, 0x18);     /* Opcode 0F 18 /3 */
-  ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem));
+  format %{ "PREFETCHT0 $mem\t# Prefetch allocation to level 1 and 2 caches for write" %}
+  ins_encode %{
+    __ prefetcht0($mem$$Address);
+  %}
+  ins_pipe(ialu_mem);
+%}
+
+instruct prefetchAllocT2( memory mem ) %{
+  predicate(AllocatePrefetchInstr==2);
+  match(PrefetchAllocation mem);
+  ins_cost(125);
+
+  format %{ "PREFETCHT2 $mem\t# Prefetch allocation to level 2 cache for write" %}
+  ins_encode %{
+    __ prefetcht2($mem$$Address);
+  %}
   ins_pipe(ialu_mem);
 %}
 
@@ -7376,8 +7366,7 @@
 
 instruct membar_acquire_lock()
 %{
-  match(MemBarAcquire);
-  predicate(Matcher::prior_fast_lock(n));
+  match(MemBarAcquireLock);
   ins_cost(0);
 
   size(0);
@@ -7399,8 +7388,7 @@
 
 instruct membar_release_lock()
 %{
-  match(MemBarRelease);
-  predicate(Matcher::post_fast_unlock(n));
+  match(MemBarReleaseLock);
   ins_cost(0);
 
   size(0);
@@ -7547,7 +7535,6 @@
     __ jmp(dispatch);
   %}
   ins_pipe(pipe_jmp);
-  ins_pc_relative(1);
 %}
 
 instruct jumpXtnd_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
@@ -7568,7 +7555,6 @@
     __ jmp(dispatch);
   %}
   ins_pipe(pipe_jmp);
-  ins_pc_relative(1);
 %}
 
 instruct jumpXtnd(rRegL switch_val, rRegI dest) %{
@@ -7589,7 +7575,6 @@
     __ jmp(dispatch);
   %}
   ins_pipe(pipe_jmp);
-  ins_pc_relative(1);
 %}
 
 // Conditional move
@@ -12017,10 +12002,11 @@
   ins_cost(300);
   format %{ "jmp     $labl" %}
   size(5);
-  opcode(0xE9);
-  ins_encode(OpcP, Lbl(labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jmp(*L, false); // Always long jump
+  %}
   ins_pipe(pipe_jmp);
-  ins_pc_relative(1);
 %}
 
 // Jump Direct Conditional - Label defines a relative address from Jcc+1
@@ -12032,10 +12018,11 @@
   ins_cost(300);
   format %{ "j$cop     $labl" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode(Jcc(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 // Jump Direct Conditional - Label defines a relative address from Jcc+1
@@ -12047,10 +12034,11 @@
   ins_cost(300);
   format %{ "j$cop     $labl\t# loop end" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode(Jcc(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 // Jump Direct Conditional - Label defines a relative address from Jcc+1
@@ -12061,10 +12049,11 @@
   ins_cost(300);
   format %{ "j$cop,u   $labl\t# loop end" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode(Jcc(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 instruct jmpLoopEndUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
@@ -12074,10 +12063,11 @@
   ins_cost(200);
   format %{ "j$cop,u   $labl\t# loop end" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode(Jcc(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 // Jump Direct Conditional - using unsigned comparison
@@ -12088,10 +12078,11 @@
   ins_cost(300);
   format %{ "j$cop,u  $labl" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode(Jcc(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 instruct jmpConUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
@@ -12101,10 +12092,11 @@
   ins_cost(200);
   format %{ "j$cop,u  $labl" %}
   size(6);
-  opcode(0x0F, 0x80);
-  ins_encode(Jcc(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 instruct jmpConUCF2(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
@@ -12122,29 +12114,21 @@
       $$emit$$"done:"
     }
   %}
-  size(12);
-  opcode(0x0F, 0x80);
   ins_encode %{
     Label* l = $labl$$label;
-    $$$emit8$primary;
-    emit_cc(cbuf, $secondary, Assembler::parity);
-    int parity_disp = -1;
     if ($cop$$cmpcode == Assembler::notEqual) {
-       // the two jumps 6 bytes apart so the jump distances are too
-       parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
+      __ jcc(Assembler::parity, *l, false);
+      __ jcc(Assembler::notEqual, *l, false);
     } else if ($cop$$cmpcode == Assembler::equal) {
-       parity_disp = 6;
+      Label done;
+      __ jccb(Assembler::parity, done);
+      __ jcc(Assembler::equal, *l, false);
+      __ bind(done);
     } else {
        ShouldNotReachHere();
     }
-    emit_d32(cbuf, parity_disp);
-    $$$emit8$primary;
-    emit_cc(cbuf, $secondary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
-    emit_d32(cbuf, disp);
   %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
 %}
 
 // ============================================================================
@@ -12218,10 +12202,11 @@
   ins_cost(300);
   format %{ "jmp,s   $labl" %}
   size(2);
-  opcode(0xEB);
-  ins_encode(OpcP, LblShort(labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jmpb(*L);
+  %}
   ins_pipe(pipe_jmp);
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -12233,10 +12218,11 @@
   ins_cost(300);
   format %{ "j$cop,s   $labl" %}
   size(2);
-  opcode(0x70);
-  ins_encode(JccShort(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -12248,10 +12234,11 @@
   ins_cost(300);
   format %{ "j$cop,s   $labl\t# loop end" %}
   size(2);
-  opcode(0x70);
-  ins_encode(JccShort(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -12263,10 +12250,11 @@
   ins_cost(300);
   format %{ "j$cop,us  $labl\t# loop end" %}
   size(2);
-  opcode(0x70);
-  ins_encode(JccShort(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -12277,10 +12265,11 @@
   ins_cost(300);
   format %{ "j$cop,us  $labl\t# loop end" %}
   size(2);
-  opcode(0x70);
-  ins_encode(JccShort(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -12292,10 +12281,11 @@
   ins_cost(300);
   format %{ "j$cop,us  $labl" %}
   size(2);
-  opcode(0x70);
-  ins_encode(JccShort(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -12306,10 +12296,11 @@
   ins_cost(300);
   format %{ "j$cop,us  $labl" %}
   size(2);
-  opcode(0x70);
-  ins_encode(JccShort(cop, labl));
+  ins_encode %{
+    Label* L = $labl$$label;
+    __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+  %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -12329,27 +12320,21 @@
     }
   %}
   size(4);
-  opcode(0x70);
   ins_encode %{
     Label* l = $labl$$label;
-    emit_cc(cbuf, $primary, Assembler::parity);
-    int parity_disp = -1;
     if ($cop$$cmpcode == Assembler::notEqual) {
-      parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
+      __ jccb(Assembler::parity, *l);
+      __ jccb(Assembler::notEqual, *l);
     } else if ($cop$$cmpcode == Assembler::equal) {
-      parity_disp = 2;
+      Label done;
+      __ jccb(Assembler::parity, done);
+      __ jccb(Assembler::equal, *l);
+      __ bind(done);
     } else {
-      ShouldNotReachHere();
-    }
-    emit_d8(cbuf, parity_disp);
-    emit_cc(cbuf, $primary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
-    emit_d8(cbuf, disp);
-    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
-    assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
+       ShouldNotReachHere();
+    }
   %}
   ins_pipe(pipe_jcc);
-  ins_pc_relative(1);
   ins_short_branch(1);
 %}
 
@@ -12366,7 +12351,6 @@
   format %{ "fastlock $object,$box,$tmp,$scr" %}
   ins_encode(Fast_Lock(object, box, tmp, scr));
   ins_pipe(pipe_slow);
-  ins_pc_relative(1);
 %}
 
 instruct cmpFastUnlock(rFlagsReg cr,
@@ -12379,7 +12363,6 @@
   format %{ "fastunlock $object, $box, $tmp" %}
   ins_encode(Fast_Unlock(object, box, tmp));
   ins_pipe(pipe_slow);
-  ins_pc_relative(1);
 %}
 
 
@@ -12432,7 +12415,6 @@
   opcode(0xE8); /* E8 cd */
   ins_encode(Java_Static_Call(meth), call_epilog);
   ins_pipe(pipe_slow);
-  ins_pc_relative(1);
   ins_alignment(4);
 %}
 
@@ -12454,7 +12436,6 @@
              restore_SP,
              call_epilog);
   ins_pipe(pipe_slow);
-  ins_pc_relative(1);
   ins_alignment(4);
 %}
 
@@ -12472,7 +12453,6 @@
   opcode(0xE8); /* E8 cd */
   ins_encode(Java_Dynamic_Call(meth), call_epilog);
   ins_pipe(pipe_slow);
-  ins_pc_relative(1);
   ins_alignment(4);
 %}
 
@@ -12487,7 +12467,6 @@
   opcode(0xE8); /* E8 cd */
   ins_encode(Java_To_Runtime(meth));
   ins_pipe(pipe_slow);
-  ins_pc_relative(1);
 %}
 
 // Call runtime without safepoint
@@ -12501,7 +12480,6 @@
   opcode(0xE8); /* E8 cd */
   ins_encode(Java_To_Runtime(meth));
   ins_pipe(pipe_slow);
-  ins_pc_relative(1);
 %}
 
 // Call runtime without safepoint
@@ -12515,7 +12493,6 @@
   opcode(0xE8); /* E8 cd */
   ins_encode(Java_To_Runtime(meth));
   ins_pipe(pipe_slow);
-  ins_pc_relative(1);
 %}
 
 // Return Instruction
--- a/src/cpu/zero/vm/frame_zero.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/zero/vm/frame_zero.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -425,3 +425,9 @@
 }
 
 #endif
+
+intptr_t *frame::initial_deoptimization_info() {
+  // unused... but returns fp() to minimize changes introduced by 7087445
+  return fp();
+}
+
--- a/src/cpu/zero/vm/methodHandles_zero.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/zero/vm/methodHandles_zero.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -25,12 +25,15 @@
 
 
 // Adapters
-// just copied from x64 for now.
 enum /* platform_dependent_constants */ {
-  adapter_code_size = NOT_LP64(30000 DEBUG_ONLY(+ 10000)) LP64_ONLY(80000 DEBUG_ONLY(+ 120000))
+  adapter_code_size = 0
 };
 
+<<<<<<< local
 #define TARGET_ARCH_NYI_6939861 
+=======
+#define TARGET_ARCH_NYI_6939861 1
+>>>>>>> other
 // ..#ifdef TARGET_ARCH_NYI_6939861
 // ..  // Here are some backward compatible declarations until the 6939861 ports are updated.
 // ..  #define _adapter_flyby    (_EK_LIMIT + 10)
@@ -70,4 +73,3 @@
 // ..
 // ..  static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
 // ..#endif //TARGET_ARCH_NYI_6939861
-
--- a/src/cpu/zero/vm/sharedRuntime_zero.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -46,17 +46,6 @@
 #include "shark/sharkCompiler.hpp"
 #endif
 
-#ifdef notdef
-DeoptimizationBlob *SharedRuntime::_deopt_blob;
-SafepointBlob      *SharedRuntime::_polling_page_safepoint_handler_blob;
-SafepointBlob      *SharedRuntime::_polling_page_return_handler_blob;
-RuntimeStub        *SharedRuntime::_wrong_method_blob;
-RuntimeStub        *SharedRuntime::_ic_miss_blob;
-RuntimeStub        *SharedRuntime::_resolve_opt_virtual_call_blob;
-RuntimeStub        *SharedRuntime::_resolve_virtual_call_blob;
-RuntimeStub        *SharedRuntime::_resolve_static_call_blob;
-#endif
-
 
 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
                                            VMRegPair *regs,
@@ -122,7 +111,6 @@
   return DeoptimizationBlob::create(&buffer, NULL, 0, 0, 0, 0);
 }
 
-
 void SharedRuntime::generate_deopt_blob() {
   _deopt_blob = generate_empty_deopt_blob();
 }
@@ -134,25 +122,6 @@
 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
   return generate_empty_runtime_stub("resolve_blob");
 }
-#ifdef notdef
-void SharedRuntime::generate_stubs() {
-  _wrong_method_blob =
-    generate_empty_runtime_stub("wrong_method_stub");
-  _ic_miss_blob =
-    generate_empty_runtime_stub("ic_miss_stub");
-  _resolve_opt_virtual_call_blob =
-    generate_empty_runtime_stub("resolve_opt_virtual_call");
-  _resolve_virtual_call_blob =
-    generate_empty_runtime_stub("resolve_virtual_call");
-  _resolve_static_call_blob =
-    generate_empty_runtime_stub("resolve_static_call");
-
-  _polling_page_safepoint_handler_blob =
-    generate_empty_safepoint_blob();
-  _polling_page_return_handler_blob =
-    generate_empty_safepoint_blob();
-}
-#endif
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
--- a/src/cpu/zero/vm/stack_zero.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/zero/vm/stack_zero.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/stubGenerator_zero.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/cpu/zero/vm/stubGenerator_zero.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -215,12 +215,6 @@
     StubRoutines::_throw_AbstractMethodError_entry =
       ShouldNotCallThisStub();
 
-    StubRoutines::_throw_ArithmeticException_entry =
-      ShouldNotCallThisStub();
-
-    StubRoutines::_throw_NullPointerException_entry =
-      ShouldNotCallThisStub();
-
     StubRoutines::_throw_NullPointerException_at_call_entry =
       ShouldNotCallThisStub();
 
--- a/src/os/linux/vm/os_linux.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -169,7 +169,35 @@
 /* Used to protect dlsym() calls */
 static pthread_mutex_t dl_mutex;
 
-////////////////////////////////////////////////////////////////////////////////
+#ifdef JAVASE_EMBEDDED
+class MemNotifyThread: public Thread {
+  friend class VMStructs;
+ public:
+  virtual void run();
+
+ private:
+  static MemNotifyThread* _memnotify_thread;
+  int _fd;
+
+ public:
+
+  // Constructor
+  MemNotifyThread(int fd);
+
+  // Tester
+  bool is_memnotify_thread() const { return true; }
+
+  // Printing
+  char* name() const { return (char*)"Linux MemNotify Thread"; }
+
+  // Returns the single instance of the MemNotifyThread
+  static MemNotifyThread* memnotify_thread() { return _memnotify_thread; }
+
+  // Create and start the single instance of MemNotifyThread
+  static void start();
+};
+#endif // JAVASE_EMBEDDED
+
 // utility functions
 
 static int SR_initialize();
@@ -2120,6 +2148,14 @@
   st->cr();
 }
 
+void os::pd_print_cpu_info(outputStream* st) {
+  st->print("\n/proc/cpuinfo:\n");
+  if (!_print_ascii_file("/proc/cpuinfo", st)) {
+    st->print("  <Not Available>");
+  }
+  st->cr();
+}
+
 void os::print_memory_info(outputStream* st) {
 
   st->print("Memory:");
@@ -2497,7 +2533,13 @@
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
                                    MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
-  return res != (uintptr_t) MAP_FAILED;
+  if (res != (uintptr_t) MAP_FAILED) {
+    if (UseNUMAInterleaving) {
+      numa_make_global(addr, size);
+    }
+    return true;
+  }
+  return false;
 }
 
 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
@@ -2518,10 +2560,20 @@
       (uintptr_t) ::mmap(addr, size, prot,
                          MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
                          -1, 0);
-    return res != (uintptr_t) MAP_FAILED;
-  }
-
-  return commit_memory(addr, size, exec);
+    if (res != (uintptr_t) MAP_FAILED) {
+      if (UseNUMAInterleaving) {
+        numa_make_global(addr, size);
+      }
+      return true;
+    }
+    // Fall through and try to use small pages
+  }
+
+  if (commit_memory(addr, size, exec)) {
+    realign_memory(addr, size, alignment_hint);
+    return true;
+  }
+  return false;
 }
 
 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
@@ -2533,7 +2585,7 @@
 }
 
 void os::free_memory(char *addr, size_t bytes) {
-  ::madvise(addr, bytes, MADV_DONTNEED);
+  commit_memory(addr, bytes, false);
 }
 
 void os::numa_make_global(char *addr, size_t bytes) {
@@ -2577,6 +2629,31 @@
   return end;
 }
 
+
+int os::Linux::sched_getcpu_syscall(void) {
+  unsigned int cpu;
+  int retval = -1;
+
+#if defined(IA32)
+# ifndef SYS_getcpu
+# define SYS_getcpu 318
+# endif
+  retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
+#elif defined(AMD64)
+// Unfortunately we have to bring all these macros here from vsyscall.h
+// to be able to compile on old linuxes.
+# define __NR_vgetcpu 2
+# define VSYSCALL_START (-10UL << 20)
+# define VSYSCALL_SIZE 1024
+# define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
+  typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
+  vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
+  retval = vgetcpu(&cpu, NULL, NULL);
+#endif
+
+  return (retval == -1) ? retval : cpu;
+}
+
 // Something to do with the numa-aware allocator needs these symbols
 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
 extern "C" JNIEXPORT void numa_error(char *where) { }
@@ -2600,6 +2677,10 @@
   set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
                                   dlsym(RTLD_DEFAULT, "sched_getcpu")));
 
+  // If it's not, try a direct syscall.
+  if (sched_getcpu() == -1)
+    set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
+
   if (sched_getcpu() != -1) { // Does it work?
     void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
     if (handle != NULL) {
@@ -3090,6 +3171,10 @@
      return NULL;
   }
 
+  if ((addr != NULL) && UseNUMAInterleaving) {
+    numa_make_global(addr, bytes);
+  }
+
   return addr;
 }
 
@@ -4272,7 +4357,16 @@
 }
 
 // this is called at the end of vm_initialization
-void os::init_3(void) { }
+void os::init_3(void)
+{
+#ifdef JAVASE_EMBEDDED
+  // Start the MemNotifyThread
+  if (LowMemoryProtection) {
+    MemNotifyThread::start();
+  }
+  return;
+#endif
+}
 
 // Mark the polling page as unreadable
 void os::make_polling_page_unreadable(void) {
@@ -5395,3 +5489,78 @@
     return true;
 }
 
+
+#ifdef JAVASE_EMBEDDED
+//
+// A thread to watch the '/dev/mem_notify' device, which will tell us when the OS is running low on memory.
+//
+MemNotifyThread* MemNotifyThread::_memnotify_thread = NULL;
+
+// ctor
+//
+MemNotifyThread::MemNotifyThread(int fd): Thread() {
+  assert(memnotify_thread() == NULL, "we can only allocate one MemNotifyThread");
+  _fd = fd;
+
+  if (os::create_thread(this, os::os_thread)) {
+    _memnotify_thread = this;
+    os::set_priority(this, NearMaxPriority);
+    os::start_thread(this);
+  }
+}
+
+// Where all the work gets done
+//
+void MemNotifyThread::run() {
+  assert(this == memnotify_thread(), "expected the singleton MemNotifyThread");
+
+  // Set up the select arguments
+  fd_set rfds;
+  if (_fd != -1) {
+    FD_ZERO(&rfds);
+    FD_SET(_fd, &rfds);
+  }
+
+  // Now wait for the mem_notify device to wake up
+  while (1) {
+    // Wait for the mem_notify device to signal us..
+    int rc = select(_fd+1, _fd != -1 ? &rfds : NULL, NULL, NULL, NULL);
+    if (rc == -1) {
+      perror("select!\n");
+      break;
+    } else if (rc) {
+      //ssize_t free_before = os::available_memory();
+      //tty->print ("Notified: Free: %dK \n",os::available_memory()/1024);
+
+      // The kernel is telling us there is not much memory left...
+      // try to do something about that
+
+      // If we are not already in a GC, try one.
+      if (!Universe::heap()->is_gc_active()) {
+        Universe::heap()->collect(GCCause::_allocation_failure);
+
+        //ssize_t free_after = os::available_memory();
+        //tty->print ("Post-Notify: Free: %dK\n",free_after/1024);
+        //tty->print ("GC freed: %dK\n", (free_after - free_before)/1024);
+      }
+      // We might want to do something like the following if we find the GC's are not helping...
+      // Universe::heap()->size_policy()->set_gc_time_limit_exceeded(true);
+    }
+  }
+}
+
+//
+// See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
+//
+void MemNotifyThread::start() {
+  int    fd;
+  fd = open ("/dev/mem_notify", O_RDONLY, 0);
+  if (fd < 0) {
+      return;
+  }
+
+  if (memnotify_thread() == NULL) {
+    new MemNotifyThread(fd);
+  }
+}
+#endif // JAVASE_EMBEDDED
--- a/src/os/linux/vm/os_linux.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os/linux/vm/os_linux.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -263,6 +263,7 @@
   static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
   static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
+  static int sched_getcpu_syscall(void);
 public:
   static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
   static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os/solaris/vm/os_solaris.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2317,6 +2317,10 @@
   return status;
 }
 
+void os::pd_print_cpu_info(outputStream* st) {
+  // Nothing to do for now.
+}
+
 void os::print_memory_info(outputStream* st) {
   st->print("Memory:");
   st->print(" %dk page", os::vm_page_size()>>10);
@@ -2773,8 +2777,14 @@
 bool os::commit_memory(char* addr, size_t bytes, bool exec) {
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   size_t size = bytes;
-  return
-     NULL != Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
+  char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
+  if (res != NULL) {
+    if (UseNUMAInterleaving) {
+      numa_make_global(addr, bytes);
+    }
+    return true;
+  }
+  return false;
 }
 
 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
@@ -3248,7 +3258,6 @@
 //                                 supported Solaris versions, this combination
 //                                 is equivalent to +UseISM -UseMPSS.
 
-typedef int (*getpagesizes_func_type) (size_t[], int);
 static size_t _large_page_size = 0;
 
 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) {
@@ -3280,23 +3289,29 @@
 }
 
 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
-  getpagesizes_func_type getpagesizes_func =
-    CAST_TO_FN_PTR(getpagesizes_func_type, dlsym(RTLD_DEFAULT, "getpagesizes"));
-  if (getpagesizes_func == NULL) {
-    if (warn) {
-      warning("MPSS is not supported by the operating system.");
-    }
-    return false;
-  }
-
   const unsigned int usable_count = VM_Version::page_size_count();
   if (usable_count == 1) {
     return false;
   }
 
+  // Find the right getpagesizes interface.  When solaris 11 is the minimum
+  // build platform, getpagesizes() (without the '2') can be called directly.
+  typedef int (*gps_t)(size_t[], int);
+  gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
+  if (gps_func == NULL) {
+    gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
+    if (gps_func == NULL) {
+      if (warn) {
+        warning("MPSS is not supported by the operating system.");
+      }
+      return false;
+    }
+  }
+
   // Fill the array of page sizes.
-  int n = getpagesizes_func(_page_sizes, page_sizes_max);
+  int n = (*gps_func)(_page_sizes, page_sizes_max);
   assert(n > 0, "Solaris bug?");
+
   if (n == page_sizes_max) {
     // Add a sentinel value (necessary only if the array was completely filled
     // since it is static (zeroed at initialization)).
@@ -3304,6 +3319,7 @@
     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
   }
   assert(_page_sizes[n] == 0, "missing sentinel");
+  trace_page_sizes("available page sizes", _page_sizes, n);
 
   if (n == 1) return false;     // Only one page size available.
 
@@ -3333,6 +3349,7 @@
   }
   *page_size = _page_sizes[0];
 
+  trace_page_sizes("usable page sizes", _page_sizes, end + 1);
   return true;
 }
 
@@ -3378,12 +3395,11 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseISM, "only for ISM large pages");
 
-  size_t size = bytes;
   char* retAddr = NULL;
   int shmid;
   key_t ismKey;
@@ -3425,7 +3441,9 @@
     }
     return NULL;
   }
-
+  if ((retAddr != NULL) && UseNUMAInterleaving) {
+    numa_make_global(retAddr, size);
+  }
   return retAddr;
 }
 
--- a/src/os/windows/vm/os_windows.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os/windows/vm/os_windows.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1648,6 +1648,10 @@
   st->cr();
 }
 
+void os::pd_print_cpu_info(outputStream* st) {
+  // Nothing to do for now.
+}
+
 void os::print_memory_info(outputStream* st) {
   st->print("Memory:");
   st->print(" %dk page", os::vm_page_size()>>10);
@@ -2610,6 +2614,57 @@
 static HANDLE    _hProcess;
 static HANDLE    _hToken;
 
+// Container for NUMA node list info
+class NUMANodeListHolder {
+private:
+  int *_numa_used_node_list;  // allocated below
+  int _numa_used_node_count;
+
+  void free_node_list() {
+    if (_numa_used_node_list != NULL) {
+      FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
+    }
+  }
+
+public:
+  NUMANodeListHolder() {
+    _numa_used_node_count = 0;
+    _numa_used_node_list = NULL;
+    // do rest of initialization in build routine (after function pointers are set up)
+  }
+
+  ~NUMANodeListHolder() {
+    free_node_list();
+  }
+
+  bool build() {
+    DWORD_PTR proc_aff_mask;
+    DWORD_PTR sys_aff_mask;
+    if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
+    ULONG highest_node_number;
+    if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false;
+    free_node_list();
+    _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1);
+    for (unsigned int i = 0; i <= highest_node_number; i++) {
+      ULONGLONG proc_mask_numa_node;
+      if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
+      if ((proc_aff_mask & proc_mask_numa_node)!=0) {
+        _numa_used_node_list[_numa_used_node_count++] = i;
+      }
+    }
+    return (_numa_used_node_count > 1);
+  }
+
+  int get_count() {return _numa_used_node_count;}
+  int get_node_list_entry(int n) {
+    // for indexes out of range, returns -1
+    return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
+  }
+
+} numa_node_list_holder;
+
+
+
 static size_t _large_page_size = 0;
 
 static bool resolve_functions_for_large_page_init() {
@@ -2649,6 +2704,153 @@
   _hToken = NULL;
 }
 
+static bool numa_interleaving_init() {
+  bool success = false;
+  bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
+
+  // print a warning if UseNUMAInterleaving flag is specified on command line
+  bool warn_on_failure = use_numa_interleaving_specified;
+# define WARN(msg) if (warn_on_failure) { warning(msg); }
+
+  // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
+  size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
+  NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
+
+  if (os::Kernel32Dll::NumaCallsAvailable()) {
+    if (numa_node_list_holder.build()) {
+      if (PrintMiscellaneous && Verbose) {
+        tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
+        for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
+          tty->print("%d ", numa_node_list_holder.get_node_list_entry(i));
+        }
+        tty->print("\n");
+      }
+      success = true;
+    } else {
+      WARN("Process does not cover multiple NUMA nodes.");
+    }
+  } else {
+    WARN("NUMA Interleaving is not supported by the operating system.");
+  }
+  if (!success) {
+    if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
+  }
+  return success;
+#undef WARN
+}
+
+// this routine is used whenever we need to reserve a contiguous VA range
+// but we need to make separate VirtualAlloc calls for each piece of the range
+// Reasons for doing this:
+//  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
+//  * UseNUMAInterleaving requires a separate node for each piece
+static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot,
+                                         bool should_inject_error=false) {
+  char * p_buf;
+  // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
+  size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
+  size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
+
+  // first reserve enough address space in advance since we want to be
+  // able to break a single contiguous virtual address range into multiple
+  // large page commits but WS2003 does not allow reserving large page space
+  // so we just use 4K pages for reserve, this gives us a legal contiguous
+  // address space. then we will deallocate that reservation, and re alloc
+  // using large pages
+  const size_t size_of_reserve = bytes + chunk_size;
+  if (bytes > size_of_reserve) {
+    // Overflowed.
+    return NULL;
+  }
+  p_buf = (char *) VirtualAlloc(addr,
+                                size_of_reserve,  // size of Reserve
+                                MEM_RESERVE,
+                                PAGE_READWRITE);
+  // If reservation failed, return NULL
+  if (p_buf == NULL) return NULL;
+
+  os::release_memory(p_buf, bytes + chunk_size);
+
+  // we still need to round up to a page boundary (in case we are using large pages)
+  // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
+  // instead we handle this in the bytes_to_rq computation below
+  p_buf = (char *) align_size_up((size_t)p_buf, page_size);
+
+  // now go through and allocate one chunk at a time until all bytes are
+  // allocated
+  size_t  bytes_remaining = bytes;
+  // An overflow of align_size_up() would have been caught above
+  // in the calculation of size_of_reserve.
+  char * next_alloc_addr = p_buf;
+  HANDLE hProc = GetCurrentProcess();
+
+#ifdef ASSERT
+  // Variable for the failure injection
+  long ran_num = os::random();
+  size_t fail_after = ran_num % bytes;
+#endif
+
+  int count=0;
+  while (bytes_remaining) {
+    // select bytes_to_rq to get to the next chunk_size boundary
+
+    size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
+    // Note allocate and commit
+    char * p_new;
+
+#ifdef ASSERT
+    bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
+#else
+    const bool inject_error_now = false;
+#endif
+
+    if (inject_error_now) {
+      p_new = NULL;
+    } else {
+      if (!UseNUMAInterleaving) {
+        p_new = (char *) VirtualAlloc(next_alloc_addr,
+                                      bytes_to_rq,
+                                      flags,
+                                      prot);
+      } else {
+        // get the next node to use from the used_node_list
+        assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
+        DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
+        p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc,
+                                                            next_alloc_addr,
+                                                            bytes_to_rq,
+                                                            flags,
+                                                            prot,
+                                                            node);
+      }
+    }
+
+    if (p_new == NULL) {
+      // Free any allocated pages
+      if (next_alloc_addr > p_buf) {
+        // Some memory was committed so release it.
+        size_t bytes_to_release = bytes - bytes_remaining;
+        os::release_memory(p_buf, bytes_to_release);
+      }
+#ifdef ASSERT
+      if (should_inject_error) {
+        if (TracePageSizes && Verbose) {
+          tty->print_cr("Reserving pages individually failed.");
+        }
+      }
+#endif
+      return NULL;
+    }
+    bytes_remaining -= bytes_to_rq;
+    next_alloc_addr += bytes_to_rq;
+    count++;
+  }
+  // made it this far, success
+  return p_buf;
+}
+
+
+
 void os::large_page_init() {
   if (!UseLargePages) return;
 
@@ -2718,9 +2920,30 @@
   assert((size_t)addr % os::vm_allocation_granularity() == 0,
          "reserve alignment");
   assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
-  char* res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
+  char* res;
+  // note that if UseLargePages is on, all the areas that require interleaving
+  // will go thru reserve_memory_special rather than thru here.
+  bool use_individual = (UseNUMAInterleaving && !UseLargePages);
+  if (!use_individual) {
+    res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
+  } else {
+    elapsedTimer reserveTimer;
+    if( Verbose && PrintMiscellaneous ) reserveTimer.start();
+    // in numa interleaving, we have to allocate pages individually
+    // (well really chunks of NUMAInterleaveGranularity size)
+    res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
+    if (res == NULL) {
+      warning("NUMA page allocation failed");
+    }
+    if( Verbose && PrintMiscellaneous ) {
+      reserveTimer.stop();
+      tty->print_cr("reserve_memory of %Ix bytes took %ld ms (%ld ticks)", bytes,
+                    reserveTimer.milliseconds(), reserveTimer.ticks());
+    }
+  }
   assert(res == NULL || addr == NULL || addr == res,
          "Unexpected address from reserve.");
+
   return res;
 }
 
@@ -2750,92 +2973,27 @@
 char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
 
   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-
-  if (UseLargePagesIndividualAllocation) {
+  const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
+
+  // with large pages, there are two cases where we need to use Individual Allocation
+  // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
+  // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
+  if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
     if (TracePageSizes && Verbose) {
        tty->print_cr("Reserving large pages individually.");
     }
-    char * p_buf;
-    // first reserve enough address space in advance since we want to be
-    // able to break a single contiguous virtual address range into multiple
-    // large page commits but WS2003 does not allow reserving large page space
-    // so we just use 4K pages for reserve, this gives us a legal contiguous
-    // address space. then we will deallocate that reservation, and re alloc
-    // using large pages
-    const size_t size_of_reserve = bytes + _large_page_size;
-    if (bytes > size_of_reserve) {
-      // Overflowed.
-      warning("Individually allocated large pages failed, "
-        "use -XX:-UseLargePagesIndividualAllocation to turn off");
+    char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
+    if (p_buf == NULL) {
+      // give an appropriate warning message
+      if (UseNUMAInterleaving) {
+        warning("NUMA large page allocation failed, UseLargePages flag ignored");
+      }
+      if (UseLargePagesIndividualAllocation) {
+        warning("Individually allocated large pages failed, "
+                "use -XX:-UseLargePagesIndividualAllocation to turn off");
+      }
       return NULL;
     }
-    p_buf = (char *) VirtualAlloc(addr,
-                                 size_of_reserve,  // size of Reserve
-                                 MEM_RESERVE,
-                                 PAGE_READWRITE);
-    // If reservation failed, return NULL
-    if (p_buf == NULL) return NULL;
-
-    release_memory(p_buf, bytes + _large_page_size);
-    // round up to page boundary.  If the size_of_reserve did not
-    // overflow and the reservation did not fail, this align up
-    // should not overflow.
-    p_buf = (char *) align_size_up((size_t)p_buf, _large_page_size);
-
-    // now go through and allocate one page at a time until all bytes are
-    // allocated
-    size_t  bytes_remaining = align_size_up(bytes, _large_page_size);
-    // An overflow of align_size_up() would have been caught above
-    // in the calculation of size_of_reserve.
-    char * next_alloc_addr = p_buf;
-
-#ifdef ASSERT
-    // Variable for the failure injection
-    long ran_num = os::random();
-    size_t fail_after = ran_num % bytes;
-#endif
-
-    while (bytes_remaining) {
-      size_t bytes_to_rq = MIN2(bytes_remaining, _large_page_size);
-      // Note allocate and commit
-      char * p_new;
-
-#ifdef ASSERT
-      bool inject_error = LargePagesIndividualAllocationInjectError &&
-          (bytes_remaining <= fail_after);
-#else
-      const bool inject_error = false;
-#endif
-
-      if (inject_error) {
-        p_new = NULL;
-      } else {
-        p_new = (char *) VirtualAlloc(next_alloc_addr,
-                                    bytes_to_rq,
-                                    MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES,
-                                    prot);
-      }
-
-      if (p_new == NULL) {
-        // Free any allocated pages
-        if (next_alloc_addr > p_buf) {
-          // Some memory was committed so release it.
-          size_t bytes_to_release = bytes - bytes_remaining;
-          release_memory(p_buf, bytes_to_release);
-        }
-#ifdef ASSERT
-        if (UseLargePagesIndividualAllocation &&
-            LargePagesIndividualAllocationInjectError) {
-          if (TracePageSizes && Verbose) {
-             tty->print_cr("Reserving large pages individually failed.");
-          }
-        }
-#endif
-        return NULL;
-      }
-      bytes_remaining -= bytes_to_rq;
-      next_alloc_addr += bytes_to_rq;
-    }
 
     return p_buf;
 
@@ -2863,14 +3021,43 @@
   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
   // Don't attempt to print anything if the OS call fails. We're
   // probably low on resources, so the print itself may cause crashes.
-  bool result = VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) != 0;
-  if (result != NULL && exec) {
-    DWORD oldprot;
-    // Windows doc says to use VirtualProtect to get execute permissions
-    return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot) != 0;
+
+  // unless we have NUMAInterleaving enabled, the range of a commit
+  // is always within a reserve covered by a single VirtualAlloc
+  // in that case we can just do a single commit for the requested size
+  if (!UseNUMAInterleaving) {
+    if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false;
+    if (exec) {
+      DWORD oldprot;
+      // Windows doc says to use VirtualProtect to get execute permissions
+      if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false;
+    }
+    return true;
   } else {
-    return result;
-  }
+
+    // when NUMAInterleaving is enabled, the commit might cover a range that
+    // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
+    // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
+    // returns represents the number of bytes that can be committed in one step.
+    size_t bytes_remaining = bytes;
+    char * next_alloc_addr = addr;
+    while (bytes_remaining > 0) {
+      MEMORY_BASIC_INFORMATION alloc_info;
+      VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
+      size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
+      if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL)
+        return false;
+      if (exec) {
+        DWORD oldprot;
+        if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot))
+          return false;
+      }
+      bytes_remaining -= bytes_to_rq;
+      next_alloc_addr += bytes_to_rq;
+    }
+  }
+  // if we made it this far, return true
+  return true;
 }
 
 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
@@ -2944,14 +3131,21 @@
 void os::numa_make_global(char *addr, size_t bytes)    { }
 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
 bool os::numa_topology_changed()                       { return false; }
-size_t os::numa_get_groups_num()                       { return 1; }
+size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
 int os::numa_get_group_id()                            { return 0; }
 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
-  if (size > 0) {
+  if (numa_node_list_holder.get_count() == 0 && size > 0) {
+    // Provide an answer for UMA systems
     ids[0] = 0;
     return 1;
-  }
-  return 0;
+  } else {
+    // check for size bigger than actual groups_num
+    size = MIN2(size, numa_get_groups_num());
+    for (int i = 0; i < (int)size; i++) {
+      ids[i] = numa_node_list_holder.get_node_list_entry(i);
+    }
+    return size;
+  }
 }
 
 bool os::get_page_info(char *start, page_info* info) {
@@ -3476,7 +3670,7 @@
     if(Verbose && PrintMiscellaneous)
       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
 #endif
-}
+  }
 
   os::large_page_init();
 
@@ -3580,7 +3774,13 @@
   prio_init();
 
   if (UseNUMA && !ForceNUMA) {
-    UseNUMA = false; // Currently unsupported.
+    UseNUMA = false; // We don't fully support this yet
+  }
+
+  if (UseNUMAInterleaving) {
+    // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
+    bool success = numa_interleaving_init();
+    if (!success) UseNUMAInterleaving = false;
   }
 
   return JNI_OK;
@@ -4754,7 +4954,14 @@
 
 // Kernel32 API
 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
+typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
+typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG);
+typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG);
+
 GetLargePageMinimum_Fn      os::Kernel32Dll::_GetLargePageMinimum = NULL;
+VirtualAllocExNuma_Fn       os::Kernel32Dll::_VirtualAllocExNuma = NULL;
+GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL;
+GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL;
 BOOL                        os::Kernel32Dll::initialized = FALSE;
 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
   assert(initialized && _GetLargePageMinimum != NULL,
@@ -4769,16 +4976,53 @@
   return _GetLargePageMinimum != NULL;
 }
 
+BOOL os::Kernel32Dll::NumaCallsAvailable() {
+  if (!initialized) {
+    initialize();
+  }
+  return _VirtualAllocExNuma != NULL;
+}
+
+LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) {
+  assert(initialized && _VirtualAllocExNuma != NULL,
+    "NUMACallsAvailable() not yet called");
+
+  return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node);
+}
+
+BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) {
+  assert(initialized && _GetNumaHighestNodeNumber != NULL,
+    "NUMACallsAvailable() not yet called");
+
+  return _GetNumaHighestNodeNumber(ptr_highest_node_number);
+}
+
+BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) {
+  assert(initialized && _GetNumaNodeProcessorMask != NULL,
+    "NUMACallsAvailable() not yet called");
+
+  return _GetNumaNodeProcessorMask(node, proc_mask);
+}
+
+
+void os::Kernel32Dll::initializeCommon() {
+  if (!initialized) {
+    HMODULE handle = ::GetModuleHandle("Kernel32.dll");
+    assert(handle != NULL, "Just check");
+    _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
+    _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma");
+    _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber");
+    _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask");
+    initialized = TRUE;
+  }
+}
+
+
 
 #ifndef JDK6_OR_EARLIER
 
 void os::Kernel32Dll::initialize() {
-  if (!initialized) {
-    HMODULE handle = ::GetModuleHandle("Kernel32.dll");
-    assert(handle != NULL, "Just check");
-    _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
-    initialized = TRUE;
-  }
+  initializeCommon();
 }
 
 
@@ -4883,18 +5127,19 @@
 Module32Next_Fn             os::Kernel32Dll::_Module32Next = NULL;
 GetNativeSystemInfo_Fn      os::Kernel32Dll::_GetNativeSystemInfo = NULL;
 
+
 void os::Kernel32Dll::initialize() {
   if (!initialized) {
     HMODULE handle = ::GetModuleHandle("Kernel32.dll");
     assert(handle != NULL, "Just check");
 
     _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");
-    _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
     _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
       ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
     _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
     _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
     _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");
+    initializeCommon();  // resolve the functions that always need resolving
 
     initialized = TRUE;
   }
@@ -4960,6 +5205,8 @@
   _GetNativeSystemInfo(lpSystemInfo);
 }
 
+
+
 // PSAPI API
 
 
@@ -5075,7 +5322,7 @@
       _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
         "OpenProcessToken");
       _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
-        "LookupPrivilegeValue");
+        "LookupPrivilegeValueA");
     }
     initialized = TRUE;
   }
--- a/src/os/windows/vm/os_windows.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os/windows/vm/os_windows.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -173,13 +173,25 @@
   static BOOL GetNativeSystemInfoAvailable();
   static void GetNativeSystemInfo(LPSYSTEM_INFO);
 
+  // NUMA calls
+  static BOOL NumaCallsAvailable();
+  static LPVOID VirtualAllocExNuma(HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
+  static BOOL GetNumaHighestNodeNumber(PULONG);
+  static BOOL GetNumaNodeProcessorMask(UCHAR, PULONGLONG);
+
 private:
   // GetLargePageMinimum available on Windows Vista/Windows Server 2003
   // and later
+  // NUMA calls available Windows Vista/WS2008 and later
+
   static SIZE_T (WINAPI *_GetLargePageMinimum)(void);
+  static LPVOID (WINAPI *_VirtualAllocExNuma) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
+  static BOOL (WINAPI *_GetNumaHighestNodeNumber) (PULONG);
+  static BOOL (WINAPI *_GetNumaNodeProcessorMask) (UCHAR, PULONGLONG);
   static BOOL initialized;
 
   static void initialize();
+  static void initializeCommon();
 
 #ifdef JDK6_OR_EARLIER
 private:
--- a/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -33,6 +33,28 @@
   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 }
 
+#ifdef MINIMIZE_RAM_USAGE
+
+void MacroAssembler::get_thread(Register thread) {
+  // call pthread_getspecific
+  // void * pthread_getspecific(pthread_key_t key);
+  if (thread != rax) push(rax);
+  push(rcx);
+  push(rdx);
+
+  push(ThreadLocalStorage::thread_index());
+  call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
+  increment(rsp, wordSize);
+
+  pop(rdx);
+  pop(rcx);
+  if (thread != rax) {
+    mov(thread, rax);
+    pop(rax);
+  }
+}
+
+#else
 void MacroAssembler::get_thread(Register thread) {
   movl(thread, rsp);
   shrl(thread, PAGE_SHIFT);
@@ -43,6 +65,7 @@
 
   movptr(thread, tls);
 }
+#endif // MINIMIZE_RAM_USAGE
 #else
 void MacroAssembler::int3() {
   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
--- a/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Thu Dec 22 15:46:11 2011 +0000
@@ -154,7 +154,7 @@
 
 
 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return 5;
+  return MachNode::size(ra_);
 }
 
 %}
--- a/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Thu Dec 22 15:46:11 2011 +0000
@@ -167,7 +167,8 @@
 }
 
 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
-  return 5;
+  // distance could be far and requires load and call through register
+  return MachNode::size(ra_);
 }
 
 %}
--- a/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -52,25 +52,20 @@
 // MADV_DONTNEED on Linux keeps the virtual memory mapping, but zaps the
 // physical memory page (i.e. similar to MADV_FREE on Solaris).
 
-#ifndef AMD64
+#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
 Thread* ThreadLocalStorage::_sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-#endif // !AMD64
 
 void ThreadLocalStorage::generate_code_for_get_thread() {
     // nothing we can do here for user-level thread
 }
 
 void ThreadLocalStorage::pd_init() {
-#ifndef AMD64
   assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(),
          "page size must be multiple of PAGE_SIZE");
-#endif // !AMD64
 }
 
 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-
-#ifndef AMD64
   address stack_top = os::current_stack_base();
   size_t stack_size = os::current_stack_size();
 
@@ -88,5 +83,17 @@
            "thread exited without detaching from VM??");
     _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread;
   }
-#endif // !AMD64
+}
+#else
+
+void ThreadLocalStorage::generate_code_for_get_thread() {
+    // nothing we can do here for user-level thread
 }
+
+void ThreadLocalStorage::pd_init() {
+}
+
+void ThreadLocalStorage::pd_set_thread(Thread* thread) {
+  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
+#endif // !AMD64 && !MINIMIZE_RAM_USAGE
--- a/src/os_cpu/linux_x86/vm/threadLS_linux_x86.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os_cpu/linux_x86/vm/threadLS_linux_x86.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -27,28 +27,32 @@
 
   // Processor dependent parts of ThreadLocalStorage
 
-#ifndef AMD64
+#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
+
   // map stack pointer to thread pointer - see notes in threadLS_linux_x86.cpp
   #define SP_BITLENGTH  32
   #define PAGE_SHIFT    12
   #define PAGE_SIZE     (1UL << PAGE_SHIFT)
   static Thread* _sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-#endif // !AMD64
 
 public:
 
-#ifndef AMD64
   static Thread** sp_map_addr() { return _sp_map; }
-#endif // !AMD64
 
   static Thread* thread() {
-#ifdef AMD64
-    return (Thread*) os::thread_local_storage_at(thread_index());
-#else
     uintptr_t sp;
     __asm__ volatile ("movl %%esp, %0" : "=r" (sp));
     return _sp_map[sp >> PAGE_SHIFT];
-#endif // AMD64
   }
 
+#else
+
+public:
+
+   static Thread* thread() {
+     return (Thread*) os::thread_local_storage_at(thread_index());
+   }
+
+#endif // AMD64 || MINIMIZE_RAM_USAGE
+
 #endif // OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
--- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -114,6 +114,11 @@
 #endif
     if (av & AV_SPARC_VIS3)         features |= vis3_instructions_m;
 
+#ifndef AV_SPARC_CBCOND
+#define AV_SPARC_CBCOND 0x10000000  /* compare and branch instrs supported */
+#endif
+    if (av & AV_SPARC_CBCOND)       features |= cbcond_instructions_m;
+
   } else {
     // getisax(2) failed, use the old legacy code.
 #ifndef PRODUCT
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Thu Dec 22 15:46:11 2011 +0000
@@ -161,7 +161,7 @@
 
 
 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return 5;
+  return MachNode::size(ra_);
 }
 
 %}
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Thu Dec 22 15:46:11 2011 +0000
@@ -180,7 +180,8 @@
 
 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const
 {
-  return 5;
+  // distance could be far and requires load and call through register
+  return MachNode::size(ra_);
 }
 
 %}
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Thu Dec 22 15:46:11 2011 +0000
@@ -482,7 +482,7 @@
                 "/export:JVM_GetThreadStateNames "+
                 "/export:JVM_GetThreadStateValues "+
                 "/export:JVM_InitAgentProperties");
-        addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib");
+        addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib");
         addAttr(rv, "OutputFile", outDll);
         addAttr(rv, "SuppressStartupBanner", "true");
         addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");
--- a/src/share/tools/hsdis/README	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/tools/hsdis/README	Thu Dec 22 15:46:11 2011 +0000
@@ -75,8 +75,16 @@
 * Installing
 
 Products are named like build/$OS-$LIBARCH/hsdis-$LIBARCH.so.  You can
-install them on your LD_LIBRARY_PATH, or inside of your JRE next to
-$LIBARCH/libjvm.so.
+install them on your LD_LIBRARY_PATH, or inside of your JRE/JDK.  The
+search path in the JVM is:
+
+1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
+2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
+
+Note that there's a bug in hotspot versions prior to hs22 that causes
+steps 2 and 3 to fail when used with JDK7.
 
 Now test:
 
--- a/src/share/vm/adlc/adlparse.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/adlc/adlparse.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -126,9 +126,6 @@
   if (_globalNames[AttributeForm::_ins_cost] == NULL) {
     parse_err(SEMERR, "Did not declare 'ins_cost' attribute");
   }
-  if (_globalNames[AttributeForm::_ins_pc_relative] == NULL) {
-    parse_err(SEMERR, "Did not declare 'ins_pc_relative' attribute");
-  }
   if (_globalNames[AttributeForm::_op_cost] == NULL) {
     parse_err(SEMERR, "Did not declare 'op_cost' attribute");
   }
@@ -2812,6 +2809,13 @@
     params->add_entry(param);
   }
 
+  // Check for duplicate ins_encode sections after parsing the block
+  // so that parsing can continue and find any other errors.
+  if (inst._insencode != NULL) {
+    parse_err(SYNERR, "Multiple ins_encode sections defined\n");
+    return;
+  }
+
   // Set encode class of this instruction.
   inst._insencode = encrule;
 }
@@ -3044,6 +3048,13 @@
   next_char();                     // move past ';'
   skipws();                        // be friendly to oper_parse()
 
+  // Check for duplicate ins_encode sections after parsing the block
+  // so that parsing can continue and find any other errors.
+  if (inst._insencode != NULL) {
+    parse_err(SYNERR, "Multiple ins_encode sections defined\n");
+    return;
+  }
+
   // Debug Stuff
   if (_AD._adl_debug > 1) fprintf(stderr,"Instruction Encode: %s\n", ec_name);
 
--- a/src/share/vm/adlc/archDesc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/adlc/archDesc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -331,10 +331,18 @@
     // Find result type for match
     const char *result  = instr->reduce_result();
 
+    if ( instr->is_ideal_branch() && instr->label_position() == -1 ||
+        !instr->is_ideal_branch() && instr->label_position() != -1) {
+      syntax_err(instr->_linenum, "%s: Only branches to a label are supported\n", rootOp);
+    }
+
     Attribute *attr = instr->_attribs;
     while (attr != NULL) {
       if (strcmp(attr->_ident,"ins_short_branch") == 0 &&
           attr->int_val(*this) != 0) {
+        if (!instr->is_ideal_branch() || instr->label_position() == -1) {
+          syntax_err(instr->_linenum, "%s: Only short branch to a label is supported\n", rootOp);
+        }
         instr->set_short_branch(true);
       } else if (strcmp(attr->_ident,"ins_alignment") == 0 &&
           attr->int_val(*this) != 0) {
--- a/src/share/vm/adlc/formssel.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/adlc/formssel.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -291,15 +291,6 @@
 }
 
 
-// Return 'true' if this instruction matches an ideal 'Copy*' node
-bool InstructForm::is_ideal_unlock() const {
-  return _matrule ? _matrule->is_ideal_unlock() : false;
-}
-
-bool InstructForm::is_ideal_call_leaf() const {
-  return _matrule ? _matrule->is_ideal_call_leaf() : false;
-}
-
 // Return 'true' if this instruction matches an ideal 'If' node
 bool InstructForm::is_ideal_if() const {
   if( _matrule == NULL ) return false;
@@ -349,12 +340,11 @@
   return _matrule->is_ideal_jump();
 }
 
-// Return 'true' if instruction matches ideal 'If' | 'Goto' |
-//                    'CountedLoopEnd' | 'Jump'
+// Return 'true' if instruction matches ideal 'If' | 'Goto' | 'CountedLoopEnd'
 bool InstructForm::is_ideal_branch() const {
   if( _matrule == NULL ) return false;
 
-  return _matrule->is_ideal_if() || _matrule->is_ideal_goto() || _matrule->is_ideal_jump();
+  return _matrule->is_ideal_if() || _matrule->is_ideal_goto();
 }
 
 
@@ -392,7 +382,7 @@
 bool InstructForm::is_ideal_control() const {
   if ( ! _matrule)  return false;
 
-  return is_ideal_return() || is_ideal_branch() || is_ideal_halt();
+  return is_ideal_return() || is_ideal_branch() || _matrule->is_ideal_jump() || is_ideal_halt();
 }
 
 // Return 'true' if this instruction matches an ideal 'Call' node
@@ -633,6 +623,8 @@
 
   if( strcmp(_matrule->_opType,"MemBarRelease") == 0 ) return true;
   if( strcmp(_matrule->_opType,"MemBarAcquire") == 0 ) return true;
+  if( strcmp(_matrule->_opType,"MemBarReleaseLock") == 0 ) return true;
+  if( strcmp(_matrule->_opType,"MemBarAcquireLock") == 0 ) return true;
 
   return false;
 }
@@ -1094,6 +1086,9 @@
   else if (is_ideal_if()) {
     return "MachIfNode";
   }
+  else if (is_ideal_goto()) {
+    return "MachGotoNode";
+  }
   else if (is_ideal_fastlock()) {
     return "MachFastLockNode";
   }
@@ -1185,6 +1180,34 @@
       strcmp(reduce_result(), short_branch->reduce_result()) == 0 &&
       _matrule->equivalent(AD.globalNames(), short_branch->_matrule)) {
     // The instructions are equivalent.
+
+    // Now verify that both instructions have the same parameters and
+    // the same effects. Both branch forms should have the same inputs
+    // and resulting projections to correctly replace a long branch node
+    // with corresponding short branch node during code generation.
+
+    bool different = false;
+    if (short_branch->_components.count() != _components.count()) {
+       different = true;
+    } else if (_components.count() > 0) {
+      short_branch->_components.reset();
+      _components.reset();
+      Component *comp;
+      while ((comp = _components.iter()) != NULL) {
+        Component *short_comp = short_branch->_components.iter();
+        if (short_comp == NULL ||
+            short_comp->_type != comp->_type ||
+            short_comp->_usedef != comp->_usedef) {
+          different = true;
+          break;
+        }
+      }
+      if (short_branch->_components.iter() != NULL)
+        different = true;
+    }
+    if (different) {
+      globalAD->syntax_err(short_branch->_linenum, "Instruction %s and its short form %s have different parameters\n", _ident, short_branch->_ident);
+    }
     if (AD._short_branch_debug) {
       fprintf(stderr, "Instruction %s has short form %s\n", _ident, short_branch->_ident);
     }
@@ -2706,7 +2729,6 @@
 int         AttributeForm::_insId   = 0;           // start counter at 0
 int         AttributeForm::_opId    = 0;           // start counter at 0
 const char* AttributeForm::_ins_cost = "ins_cost"; // required name
-const char* AttributeForm::_ins_pc_relative = "ins_pc_relative";
 const char* AttributeForm::_op_cost  = "op_cost";  // required name
 
 AttributeForm::AttributeForm(char *attr, int type, char *attrdef)
@@ -3368,7 +3390,9 @@
     "ClearArray"
   };
   int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
-  if( strcmp(_opType,"PrefetchRead")==0 || strcmp(_opType,"PrefetchWrite")==0 )
+  if( strcmp(_opType,"PrefetchRead")==0 ||
+      strcmp(_opType,"PrefetchWrite")==0 ||
+      strcmp(_opType,"PrefetchAllocation")==0 )
     return 1;
   if( _lChild ) {
     const char *opType = _lChild->_opType;
@@ -3623,7 +3647,27 @@
   assert( mNode2->_opType, "Must have _opType");
   const Form *form  = globals[_opType];
   const Form *form2 = globals[mNode2->_opType];
-  return (form == form2);
+  if( form != form2 ) {
+    return false;
+  }
+
+  // Check that their children also match
+  if (_lChild ) {
+    if( !_lChild->equivalent(globals, mNode2->_lChild) )
+      return false;
+  } else if (mNode2->_lChild) {
+    return false; // I have NULL left child, mNode2 has non-NULL left child.
+  }
+
+  if (_rChild ) {
+    if( !_rChild->equivalent(globals, mNode2->_rChild) )
+      return false;
+  } else if (mNode2->_rChild) {
+    return false; // I have NULL right child, mNode2 has non-NULL right child.
+  }
+
+  // We've made it through the gauntlet.
+  return true;
 }
 
 //-------------------------- has_commutative_op -------------------------------
@@ -3909,19 +3953,6 @@
   return 0;
 }
 
-bool MatchRule::is_ideal_unlock() const {
-  if( !_opType ) return false;
-  return !strcmp(_opType,"Unlock") || !strcmp(_opType,"FastUnlock");
-}
-
-
-bool MatchRule::is_ideal_call_leaf() const {
-  if( !_opType ) return false;
-  return !strcmp(_opType,"CallLeaf")     ||
-         !strcmp(_opType,"CallLeafNoFP");
-}
-
-
 bool MatchRule::is_ideal_if() const {
   if( !_opType ) return false;
   return
@@ -3941,6 +3972,8 @@
   return
     !strcmp(_opType,"MemBarAcquire"  ) ||
     !strcmp(_opType,"MemBarRelease"  ) ||
+    !strcmp(_opType,"MemBarAcquireLock") ||
+    !strcmp(_opType,"MemBarReleaseLock") ||
     !strcmp(_opType,"MemBarVolatile" ) ||
     !strcmp(_opType,"MemBarCPUOrder" ) ;
 }
--- a/src/share/vm/adlc/formssel.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/adlc/formssel.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -145,8 +145,6 @@
   virtual int         is_empty_encoding() const; // _size=0 and/or _insencode empty
   virtual int         is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal
   virtual int         is_ideal_copy() const;    // node matches ideal 'Copy*'
-  virtual bool        is_ideal_unlock() const;  // node matches ideal 'Unlock'
-  virtual bool        is_ideal_call_leaf() const; // node matches ideal 'CallLeaf'
   virtual bool        is_ideal_if()   const;    // node matches ideal 'If'
   virtual bool        is_ideal_fastlock() const; // node matches 'FastLock'
   virtual bool        is_ideal_membar() const;  // node matches ideal 'MemBarXXX'
@@ -857,7 +855,6 @@
   int  type() { return id;}        // return this object's "id"
 
   static const char* _ins_cost;        // "ins_cost"
-  static const char* _ins_pc_relative; // "ins_pc_relative"
   static const char* _op_cost;         // "op_cost"
 
   void dump();                     // Debug printer
@@ -1002,8 +999,6 @@
   bool       is_chain_rule(FormDict &globals) const;
   int        is_ideal_copy() const;
   int        is_expensive() const;     // node matches ideal 'CosD'
-  bool       is_ideal_unlock() const;
-  bool       is_ideal_call_leaf() const;
   bool       is_ideal_if()   const;    // node matches ideal 'If'
   bool       is_ideal_fastlock() const; // node matches ideal 'FastLock'
   bool       is_ideal_jump()   const;  // node matches ideal 'Jump'
--- a/src/share/vm/adlc/output_c.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/adlc/output_c.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -3088,12 +3088,19 @@
     int label_position = instr->label_position();
     if( label_position != -1 ) {
       // Set the label
-      fprintf(fp,"void %sNode::label_set( Label& label, uint block_num ) {\n", instr->_ident);
+      fprintf(fp,"void %sNode::label_set( Label* label, uint block_num ) {\n", instr->_ident);
       fprintf(fp,"  labelOper* oper  = (labelOper*)(opnd_array(%d));\n",
               label_position );
-      fprintf(fp,"  oper->_label     = &label;\n");
+      fprintf(fp,"  oper->_label     = label;\n");
       fprintf(fp,"  oper->_block_num = block_num;\n");
       fprintf(fp,"}\n");
+      // Save the label
+      fprintf(fp,"void %sNode::save_label( Label** label, uint* block_num ) {\n", instr->_ident);
+      fprintf(fp,"  labelOper* oper  = (labelOper*)(opnd_array(%d));\n",
+              label_position );
+      fprintf(fp,"  *label = oper->_label;\n");
+      fprintf(fp,"  *block_num = oper->_block_num;\n");
+      fprintf(fp,"}\n");
     }
   }
 
--- a/src/share/vm/adlc/output_h.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/adlc/output_h.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1519,8 +1519,9 @@
     // Declare Node::methods that set operand Label's contents
     int label_position = instr->label_position();
     if( label_position != -1 ) {
-      // Set the label, stored in labelOper::_branch_label
-      fprintf(fp,"  virtual void           label_set( Label& label, uint block_num );\n");
+      // Set/Save the label, stored in labelOper::_branch_label
+      fprintf(fp,"  virtual void           label_set( Label* label, uint block_num );\n");
+      fprintf(fp,"  virtual void           save_label( Label** label, uint* block_num );\n");
     }
 
     // If this instruction contains a methodOper
@@ -1536,16 +1537,16 @@
     // Each instruction attribute results in a virtual call of same name.
     // The ins_cost is not handled here.
     Attribute *attr = instr->_attribs;
-    bool is_pc_relative = false;
+    bool avoid_back_to_back = false;
     while (attr != NULL) {
       if (strcmp(attr->_ident,"ins_cost") &&
-          strcmp(attr->_ident,"ins_pc_relative")) {
+          strcmp(attr->_ident,"ins_short_branch")) {
         fprintf(fp,"  int             %s() const { return %s; }\n",
                 attr->_ident, attr->_val);
       }
-      // Check value for ins_pc_relative, and if it is true (1), set the flag
-      if (!strcmp(attr->_ident,"ins_pc_relative") && attr->int_val(*this) != 0)
-        is_pc_relative = true;
+      // Check value for ins_avoid_back_to_back, and if it is true (1), set the flag
+      if (!strcmp(attr->_ident,"ins_avoid_back_to_back") && attr->int_val(*this) != 0)
+        avoid_back_to_back = true;
       attr = (Attribute *)attr->_next;
     }
 
@@ -1657,20 +1658,10 @@
     fprintf(fp," _num_opnds = %d; _opnds = _opnd_array; ", instr->num_opnds());
 
     bool node_flags_set = false;
-    // flag: if this instruction matches an ideal 'Goto' node
-    if ( instr->is_ideal_goto() ) {
-      fprintf(fp,"init_flags(Flag_is_Goto");
-      node_flags_set = true;
-    }
-
     // flag: if this instruction matches an ideal 'Copy*' node
     if ( instr->is_ideal_copy() != 0 ) {
-      if ( node_flags_set ) {
-        fprintf(fp," | Flag_is_Copy");
-      } else {
-        fprintf(fp,"init_flags(Flag_is_Copy");
-        node_flags_set = true;
-      }
+      fprintf(fp,"init_flags(Flag_is_Copy");
+      node_flags_set = true;
     }
 
     // Is an instruction is a constant?  If so, get its type
@@ -1688,16 +1679,6 @@
       }
     }
 
-    // flag: if instruction matches 'If' | 'Goto' | 'CountedLoopEnd | 'Jump'
-    if ( instr->is_ideal_branch() ) {
-      if ( node_flags_set ) {
-        fprintf(fp," | Flag_is_Branch");
-      } else {
-        fprintf(fp,"init_flags(Flag_is_Branch");
-        node_flags_set = true;
-      }
-    }
-
     // flag: if this instruction is cisc alternate
     if ( can_cisc_spill() && instr->is_cisc_alternate() ) {
       if ( node_flags_set ) {
@@ -1708,16 +1689,6 @@
       }
     }
 
-    // flag: if this instruction is pc relative
-    if ( is_pc_relative ) {
-      if ( node_flags_set ) {
-        fprintf(fp," | Flag_is_pc_relative");
-      } else {
-        fprintf(fp,"init_flags(Flag_is_pc_relative");
-        node_flags_set = true;
-      }
-    }
-
     // flag: if this instruction has short branch form
     if ( instr->has_short_branch_form() ) {
       if ( node_flags_set ) {
@@ -1728,6 +1699,16 @@
       }
     }
 
+    // flag: if this instruction should not be generated back to back.
+    if ( avoid_back_to_back ) {
+      if ( node_flags_set ) {
+        fprintf(fp," | Flag_avoid_back_to_back");
+      } else {
+        fprintf(fp,"init_flags(Flag_avoid_back_to_back");
+        node_flags_set = true;
+      }
+    }
+
     // Check if machine instructions that USE memory, but do not DEF memory,
     // depend upon a node that defines memory in machine-independent graph.
     if ( instr->needs_anti_dependence_check(_globalNames) ) {
@@ -1743,10 +1724,6 @@
       fprintf(fp,"); ");
     }
 
-    if (instr->is_ideal_unlock() || instr->is_ideal_call_leaf()) {
-      fprintf(fp,"clear_flag(Flag_is_safepoint_node); ");
-    }
-
     fprintf(fp,"}\n");
 
     // size_of, used by base class's clone to obtain the correct size.
--- a/src/share/vm/c1/c1_Compilation.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_Compilation.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -346,7 +346,6 @@
     implicit_exception_table(),
     compiler(),
     _env->comp_level(),
-    true,
     has_unsafe_access()
   );
 }
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -28,11 +28,14 @@
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_GraphBuilder.hpp"
 #include "c1/c1_InstructionPrinter.hpp"
+#include "ci/ciCallSite.hpp"
 #include "ci/ciField.hpp"
 #include "ci/ciKlass.hpp"
+#include "ci/ciMethodHandle.hpp"
 #include "compiler/compileBroker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "runtime/compilationPolicy.hpp"
 #include "utilities/bitMap.inline.hpp"
 
 class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
@@ -1423,7 +1426,7 @@
     // See whether this is the first return; if so, store off some
     // of the state for later examination
     if (num_returns() == 0) {
-      set_inline_cleanup_info(_block, _last, state());
+      set_inline_cleanup_info();
     }
 
     // The current bci() is in the wrong scope, so use the bci() of
@@ -1581,6 +1584,8 @@
     code = Bytecodes::_invokespecial;
   }
 
+  bool is_invokedynamic = code == Bytecodes::_invokedynamic;
+
   // NEEDS_CLEANUP
   // I've added the target-is_loaded() test below but I don't really understand
   // how klass->is_loaded() can be true and yet target->is_loaded() is false.
@@ -1692,26 +1697,31 @@
       && target->will_link(klass, callee_holder, code)) {
     // callee is known => check if we have static binding
     assert(target->is_loaded(), "callee must be known");
-    if (code == Bytecodes::_invokestatic
-     || code == Bytecodes::_invokespecial
-     || code == Bytecodes::_invokevirtual && target->is_final_method()
-    ) {
-      // static binding => check if callee is ok
-      ciMethod* inline_target = (cha_monomorphic_target != NULL)
-                                  ? cha_monomorphic_target
-                                  : target;
-      bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
+    if (code == Bytecodes::_invokestatic  ||
+        code == Bytecodes::_invokespecial ||
+        code == Bytecodes::_invokevirtual && target->is_final_method() ||
+        code == Bytecodes::_invokedynamic) {
+      ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
+      bool success = false;
+      if (target->is_method_handle_invoke()) {
+        // method handle invokes
+        success = !is_invokedynamic ? for_method_handle_inline(target) : for_invokedynamic_inline(target);
+      }
+      if (!success) {
+        // static binding => check if callee is ok
+        success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
+      }
       CHECK_BAILOUT();
 
 #ifndef PRODUCT
       // printing
-      if (PrintInlining && !res) {
+      if (PrintInlining && !success) {
         // if it was successfully inlined, then it was already printed.
-        print_inline_result(inline_target, res);
+        print_inline_result(inline_target, success);
       }
 #endif
       clear_inline_bailout();
-      if (res) {
+      if (success) {
         // Register dependence if JVMTI has either breakpoint
         // setting or hotswapping of methods capabilities since they may
         // cause deoptimization.
@@ -1739,7 +1749,6 @@
     code == Bytecodes::_invokespecial   ||
     code == Bytecodes::_invokevirtual   ||
     code == Bytecodes::_invokeinterface;
-  bool is_invokedynamic = code == Bytecodes::_invokedynamic;
   ValueType* result_type = as_ValueType(target->return_type());
 
   // We require the debug info to be the "state before" because
@@ -3032,9 +3041,12 @@
   if (callee->should_exclude()) {
     // callee is excluded
     INLINE_BAILOUT("excluded by CompilerOracle")
+  } else if (callee->should_not_inline()) {
+    // callee is excluded
+    INLINE_BAILOUT("disallowed by CompilerOracle")
   } else if (!callee->can_be_compiled()) {
     // callee is not compilable (prob. has breakpoints)
-    INLINE_BAILOUT("not compilable")
+    INLINE_BAILOUT("not compilable (disabled)")
   } else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
     // intrinsics can be native or not
     return true;
@@ -3393,10 +3405,10 @@
 }
 
 
-bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
+bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block) {
   assert(!callee->is_native(), "callee must not be native");
-  if (count_backedges() && callee->has_loops()) {
-    INLINE_BAILOUT("too complex for tiered");
+  if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
+    INLINE_BAILOUT("inlining prohibited by policy");
   }
   // first perform tests of things it's not possible to inline
   if (callee->has_exception_handlers() &&
@@ -3409,24 +3421,6 @@
   // Proper inlining of methods with jsrs requires a little more work.
   if (callee->has_jsrs()                 ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
 
-  // now perform tests that are based on flag settings
-  if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("too-deep inlining");
-  if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
-  if (callee->code_size() > max_inline_size()                 ) INLINE_BAILOUT("callee is too large");
-
-  // don't inline throwable methods unless the inlining tree is rooted in a throwable class
-  if (callee->name() == ciSymbol::object_initializer_name() &&
-      callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
-    // Throwable constructor call
-    IRScope* top = scope();
-    while (top->caller() != NULL) {
-      top = top->caller();
-    }
-    if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
-      INLINE_BAILOUT("don't inline Throwable constructors");
-    }
-  }
-
   // When SSE2 is used on intel, then no special handling is needed
   // for strictfp because the enum-constant is fixed at compile time,
   // the check for UseSSE2 is needed here
@@ -3434,13 +3428,36 @@
     INLINE_BAILOUT("caller and callee have different strict fp requirements");
   }
 
-  if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
-    INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
-  }
-
   if (is_profiling() && !callee->ensure_method_data()) {
     INLINE_BAILOUT("mdo allocation failed");
   }
+
+  // now perform tests that are based on flag settings
+  if (callee->should_inline()) {
+    // ignore heuristic controls on inlining
+  } else {
+    if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("too-deep inlining");
+    if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
+    if (callee->code_size_for_inlining() > max_inline_size()    ) INLINE_BAILOUT("callee is too large");
+
+    // don't inline throwable methods unless the inlining tree is rooted in a throwable class
+    if (callee->name() == ciSymbol::object_initializer_name() &&
+        callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
+      // Throwable constructor call
+      IRScope* top = scope();
+      while (top->caller() != NULL) {
+        top = top->caller();
+      }
+      if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
+        INLINE_BAILOUT("don't inline Throwable constructors");
+      }
+    }
+
+    if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
+      INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
+    }
+  }
+
 #ifndef PRODUCT
       // printing
   if (PrintInlining) {
@@ -3459,7 +3476,8 @@
 
   // Insert null check if necessary
   Value recv = NULL;
-  if (code() != Bytecodes::_invokestatic) {
+  if (code() != Bytecodes::_invokestatic &&
+      code() != Bytecodes::_invokedynamic) {
     // note: null check must happen even if first instruction of callee does
     //       an implicit null check since the callee is in a different scope
     //       and we must make sure exception handling does the right thing
@@ -3487,7 +3505,7 @@
   // fall-through of control flow, all return instructions of the
   // callee will need to be replaced by Goto's pointing to this
   // continuation point.
-  BlockBegin* cont = block_at(next_bci());
+  BlockBegin* cont = cont_block != NULL ? cont_block : block_at(next_bci());
   bool continuation_existed = true;
   if (cont == NULL) {
     cont = new BlockBegin(next_bci());
@@ -3599,27 +3617,29 @@
   // block merging. This allows load elimination and CSE to take place
   // across multiple callee scopes if they are relatively simple, and
   // is currently essential to making inlining profitable.
-  if (   num_returns() == 1
-      && block() == orig_block
-      && block() == inline_cleanup_block()) {
-    _last = inline_cleanup_return_prev();
-    _state = inline_cleanup_state();
-  } else if (continuation_preds == cont->number_of_preds()) {
-    // Inlining caused that the instructions after the invoke in the
-    // caller are not reachable any more. So skip filling this block
-    // with instructions!
-    assert (cont == continuation(), "");
-    assert(_last && _last->as_BlockEnd(), "");
-    _skip_block = true;
-  } else {
-    // Resume parsing in continuation block unless it was already parsed.
-    // Note that if we don't change _last here, iteration in
-    // iterate_bytecodes_for_block will stop when we return.
-    if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
-      // add continuation to work list instead of parsing it immediately
+  if (cont_block == NULL) {
+    if (num_returns() == 1
+        && block() == orig_block
+        && block() == inline_cleanup_block()) {
+      _last  = inline_cleanup_return_prev();
+      _state = inline_cleanup_state();
+    } else if (continuation_preds == cont->number_of_preds()) {
+      // Inlining caused that the instructions after the invoke in the
+      // caller are not reachable any more. So skip filling this block
+      // with instructions!
+      assert(cont == continuation(), "");
       assert(_last && _last->as_BlockEnd(), "");
-      scope_data()->parent()->add_to_work_list(continuation());
       _skip_block = true;
+    } else {
+      // Resume parsing in continuation block unless it was already parsed.
+      // Note that if we don't change _last here, iteration in
+      // iterate_bytecodes_for_block will stop when we return.
+      if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
+        // add continuation to work list instead of parsing it immediately
+        assert(_last && _last->as_BlockEnd(), "");
+        scope_data()->parent()->add_to_work_list(continuation());
+        _skip_block = true;
+      }
     }
   }
 
@@ -3636,6 +3656,114 @@
 }
 
 
+bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
+  assert(!callee->is_static(), "change next line");
+  int index = state()->stack_size() - (callee->arg_size_no_receiver() + 1);
+  Value receiver = state()->stack_at(index);
+
+  if (receiver->type()->is_constant()) {
+    ciMethodHandle* method_handle = receiver->type()->as_ObjectType()->constant_value()->as_method_handle();
+
+    // Set the callee to have access to the class and signature in
+    // the MethodHandleCompiler.
+    method_handle->set_callee(callee);
+    method_handle->set_caller(method());
+
+    // Get an adapter for the MethodHandle.
+    ciMethod* method_handle_adapter = method_handle->get_method_handle_adapter();
+    if (method_handle_adapter != NULL) {
+      return try_inline(method_handle_adapter, /*holder_known=*/ true);
+    }
+  } else if (receiver->as_CheckCast()) {
+    // Match MethodHandle.selectAlternative idiom
+    Phi* phi = receiver->as_CheckCast()->obj()->as_Phi();
+
+    if (phi != NULL && phi->operand_count() == 2) {
+      // Get the two MethodHandle inputs from the Phi.
+      Value op1 = phi->operand_at(0);
+      Value op2 = phi->operand_at(1);
+      ciMethodHandle* mh1 = op1->type()->as_ObjectType()->constant_value()->as_method_handle();
+      ciMethodHandle* mh2 = op2->type()->as_ObjectType()->constant_value()->as_method_handle();
+
+      // Set the callee to have access to the class and signature in
+      // the MethodHandleCompiler.
+      mh1->set_callee(callee);
+      mh1->set_caller(method());
+      mh2->set_callee(callee);
+      mh2->set_caller(method());
+
+      // Get adapters for the MethodHandles.
+      ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
+      ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
+
+      if (mh1_adapter != NULL && mh2_adapter != NULL) {
+        set_inline_cleanup_info();
+
+        // Build the If guard
+        BlockBegin* one = new BlockBegin(next_bci());
+        BlockBegin* two = new BlockBegin(next_bci());
+        BlockBegin* end = new BlockBegin(next_bci());
+        Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
+        block()->set_end(iff->as_BlockEnd());
+
+        // Connect up the states
+        one->merge(block()->end()->state());
+        two->merge(block()->end()->state());
+
+        // Save the state for the second inlinee
+        ValueStack* state_before = copy_state_before();
+
+        // Parse first adapter
+        _last = _block = one;
+        if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end)) {
+          restore_inline_cleanup_info();
+          block()->clear_end();  // remove appended iff
+          return false;
+        }
+
+        // Parse second adapter
+        _last = _block = two;
+        _state = state_before;
+        if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end)) {
+          restore_inline_cleanup_info();
+          block()->clear_end();  // remove appended iff
+          return false;
+        }
+
+        connect_to_end(end);
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+
+bool GraphBuilder::for_invokedynamic_inline(ciMethod* callee) {
+  // Get the MethodHandle from the CallSite.
+  ciCallSite*     call_site     = stream()->get_call_site();
+  ciMethodHandle* method_handle = call_site->get_target();
+
+  // Set the callee to have access to the class and signature in the
+  // MethodHandleCompiler.
+  method_handle->set_callee(callee);
+  method_handle->set_caller(method());
+
+  // Get an adapter for the MethodHandle.
+  ciMethod* method_handle_adapter = method_handle->get_invokedynamic_adapter();
+  if (method_handle_adapter != NULL) {
+    if (try_inline(method_handle_adapter, /*holder_known=*/ true)) {
+      // Add a dependence for invalidation of the optimization.
+      if (!call_site->is_constant_call_site()) {
+        dependency_recorder()->assert_call_site_target_value(call_site, method_handle);
+      }
+      return true;
+    }
+  }
+  return false;
+}
+
+
 void GraphBuilder::inline_bailout(const char* msg) {
   assert(msg != NULL, "inline bailout msg must exist");
   _inline_bailout_msg = msg;
--- a/src/share/vm/c1/c1_GraphBuilder.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_GraphBuilder.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -315,9 +315,17 @@
                                ValueStack* return_state) { scope_data()->set_inline_cleanup_info(block,
                                                                                                   return_prev,
                                                                                                   return_state); }
+  void set_inline_cleanup_info() {
+    set_inline_cleanup_info(_block, _last, _state);
+  }
   BlockBegin*  inline_cleanup_block() const              { return scope_data()->inline_cleanup_block();  }
   Instruction* inline_cleanup_return_prev() const        { return scope_data()->inline_cleanup_return_prev(); }
   ValueStack*  inline_cleanup_state() const              { return scope_data()->inline_cleanup_state();  }
+  void restore_inline_cleanup_info() {
+    _block = inline_cleanup_block();
+    _last  = inline_cleanup_return_prev();
+    _state = inline_cleanup_state();
+  }
   void incr_num_returns()                                { scope_data()->incr_num_returns();             }
   int  num_returns() const                               { return scope_data()->num_returns();           }
   intx max_inline_size() const                           { return scope_data()->max_inline_size();       }
@@ -329,11 +337,15 @@
   void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
 
   // inliners
-  bool try_inline(ciMethod* callee, bool holder_known);
+  bool try_inline(           ciMethod* callee, bool holder_known);
   bool try_inline_intrinsics(ciMethod* callee);
-  bool try_inline_full      (ciMethod* callee, bool holder_known);
+  bool try_inline_full(      ciMethod* callee, bool holder_known, BlockBegin* cont_block = NULL);
   bool try_inline_jsr(int jsr_dest_bci);
 
+  // JSR 292 support
+  bool for_method_handle_inline(ciMethod* callee);
+  bool for_invokedynamic_inline(ciMethod* callee);
+
   // helpers
   void inline_bailout(const char* msg);
   BlockBegin* header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state);
--- a/src/share/vm/c1/c1_Instruction.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_Instruction.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -514,28 +514,17 @@
 
 void BlockBegin::set_end(BlockEnd* end) {
   assert(end != NULL, "should not reset block end to NULL");
-  BlockEnd* old_end = _end;
-  if (end == old_end) {
+  if (end == _end) {
     return;
   }
-  // Must make the predecessors/successors match up with the
-  // BlockEnd's notion.
-  int i, n;
-  if (old_end != NULL) {
-    // disconnect from the old end
-    old_end->set_begin(NULL);
+  clear_end();
 
-    // disconnect this block from it's current successors
-    for (i = 0; i < _successors.length(); i++) {
-      _successors.at(i)->remove_predecessor(this);
-    }
-  }
+  // Set the new end
   _end = end;
 
   _successors.clear();
   // Now reset successors list based on BlockEnd
-  n = end->number_of_sux();
-  for (i = 0; i < n; i++) {
+  for (int i = 0; i < end->number_of_sux(); i++) {
     BlockBegin* sux = end->sux_at(i);
     _successors.append(sux);
     sux->_predecessors.append(this);
@@ -544,6 +533,22 @@
 }
 
 
+void BlockBegin::clear_end() {
+  // Must make the predecessors/successors match up with the
+  // BlockEnd's notion.
+  if (_end != NULL) {
+    // disconnect from the old end
+    _end->set_begin(NULL);
+
+    // disconnect this block from it's current successors
+    for (int i = 0; i < _successors.length(); i++) {
+      _successors.at(i)->remove_predecessor(this);
+    }
+    _end = NULL;
+  }
+}
+
+
 void BlockBegin::disconnect_edge(BlockBegin* from, BlockBegin* to) {
   // disconnect any edges between from and to
 #ifndef PRODUCT
--- a/src/share/vm/c1/c1_Instruction.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_Instruction.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1601,6 +1601,7 @@
   void set_depth_first_number(int dfn)           { _depth_first_number = dfn; }
   void set_linear_scan_number(int lsn)           { _linear_scan_number = lsn; }
   void set_end(BlockEnd* end);
+  void clear_end();
   void disconnect_from_graph();
   static void disconnect_edge(BlockBegin* from, BlockBegin* to);
   BlockBegin* insert_block_between(BlockBegin* sux);
--- a/src/share/vm/c1/c1_LIR.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_LIR.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -142,7 +142,8 @@
 #endif
 #ifdef ARM
   assert(disp() == 0 || index()->is_illegal(), "can't have both");
-  assert(-4096 < disp() && disp() < 4096, "architecture constraint");
+  // Note: offsets higher than 4096 must not be rejected here. They can
+  // be handled by the back-end or will be rejected if not.
 #endif
 #ifdef _LP64
   assert(base()->is_cpu_register(), "wrong base operand");
--- a/src/share/vm/c1/c1_LIRAssembler.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -121,7 +121,7 @@
 
 void LIR_Assembler::check_codespace() {
   CodeSection* cs = _masm->code_section();
-  if (cs->remaining() < (int)(1*K)) {
+  if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
     BAILOUT("CodeBuffer overflow");
   }
 }
--- a/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -133,7 +133,6 @@
   static bool is_small_constant(LIR_Opr opr);
 
   static LIR_Opr receiverOpr();
-  static LIR_Opr incomingReceiverOpr();
   static LIR_Opr osrBufferPointer();
 
   // stubs
--- a/src/share/vm/c1/c1_LinearScan.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2404,7 +2404,7 @@
       assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
 
       VMReg name = vm_reg_for_interval(interval);
-      map->set_oop(name);
+      set_oop(map, name);
 
       // Spill optimization: when the stack value is guaranteed to be always correct,
       // then it must be added to the oop map even if the interval is currently in a register
@@ -2415,7 +2415,7 @@
         assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned");
         assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice");
 
-        map->set_oop(frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs));
+        set_oop(map, frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs));
       }
     }
   }
@@ -2424,7 +2424,7 @@
   assert(info->stack() != NULL, "CodeEmitInfo must always have a stack");
   int locks_count = info->stack()->total_locks_size();
   for (int i = 0; i < locks_count; i++) {
-    map->set_oop(frame_map()->monitor_object_regname(i));
+    set_oop(map, frame_map()->monitor_object_regname(i));
   }
 
   return map;
@@ -2619,6 +2619,24 @@
 
     Location::Type loc_type = float_saved_as_double ? Location::float_in_dbl : Location::normal;
     VMReg rname = frame_map()->fpu_regname(opr->fpu_regnr());
+#ifndef __SOFTFP__
+#ifndef VM_LITTLE_ENDIAN
+    if (! float_saved_as_double) {
+      // On big endian system, we may have an issue if float registers use only
+      // the low half of the (same) double registers.
+      // Both the float and the double could have the same regnr but would correspond
+      // to two different addresses once saved.
+
+      // get next safely (no assertion checks)
+      VMReg next = VMRegImpl::as_VMReg(1+rname->value());
+      if (next->is_reg() &&
+          (next->as_FloatRegister() == rname->as_FloatRegister())) {
+        // the back-end does use the same numbering for the double and the float
+        rname = next; // VMReg for the low bits, e.g. the real VMReg for the float
+      }
+    }
+#endif
+#endif
     LocationValue* sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
 
     scope_values->append(sv);
--- a/src/share/vm/c1/c1_LinearScan.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_LinearScan.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -352,6 +352,13 @@
 
   MonitorValue*  location_for_monitor_index(int monitor_index);
   LocationValue* location_for_name(int name, Location::Type loc_type);
+  void set_oop(OopMap* map, VMReg name) {
+    if (map->legal_vm_reg_name(name)) {
+      map->set_oop(name);
+    } else {
+      bailout("illegal oopMap register name");
+    }
+  }
 
   int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
   int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
--- a/src/share/vm/c1/c1_Optimizer.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_Optimizer.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -642,7 +642,7 @@
 void NullCheckVisitor::do_NewTypeArray   (NewTypeArray*    x) { nce()->handle_NewArray(x); }
 void NullCheckVisitor::do_NewObjectArray (NewObjectArray*  x) { nce()->handle_NewArray(x); }
 void NullCheckVisitor::do_NewMultiArray  (NewMultiArray*   x) { nce()->handle_NewArray(x); }
-void NullCheckVisitor::do_CheckCast      (CheckCast*       x) {}
+void NullCheckVisitor::do_CheckCast      (CheckCast*       x) { nce()->clear_last_explicit_null_check(); }
 void NullCheckVisitor::do_InstanceOf     (InstanceOf*      x) {}
 void NullCheckVisitor::do_MonitorEnter   (MonitorEnter*    x) { nce()->handle_AccessMonitor(x); }
 void NullCheckVisitor::do_MonitorExit    (MonitorExit*     x) { nce()->handle_AccessMonitor(x); }
--- a/src/share/vm/c1/c1_Runtime1.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -375,16 +375,10 @@
 JRT_END
 
 
-JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
-  if (JvmtiExport::can_post_on_exceptions()) {
-    vframeStream vfst(thread, true);
-    address bcp = vfst.method()->bcp_from(vfst.bci());
-    JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
-  }
-JRT_END
-
-// This is a helper to allow us to safepoint but allow the outer entry
-// to be safepoint free if we need to do an osr
+// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
+// associated with the top activation record. The inlinee (that is possibly included in the enclosing
+// method) method oop is passed as an argument. In order to do that it is embedded in the code as
+// a constant.
 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) {
   nmethod* osr_nm = NULL;
   methodHandle method(THREAD, m);
@@ -420,7 +414,7 @@
     bci = branch_bci + offset;
   }
 
-  osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD);
+  osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
   return osr_nm;
 }
 
--- a/src/share/vm/c1/c1_Runtime1.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_Runtime1.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -65,7 +65,6 @@
   stub(monitorexit_nofpu)              /* optimized version that does not preserve fpu registers */ \
   stub(access_field_patching)        \
   stub(load_klass_patching)          \
-  stub(jvmti_exception_throw)        \
   stub(g1_pre_barrier_slow)          \
   stub(g1_post_barrier_slow)         \
   stub(fpu2long_stub)                \
@@ -141,7 +140,6 @@
   static void unimplemented_entry   (JavaThread* thread, StubID id);
 
   static address exception_handler_for_pc(JavaThread* thread);
-  static void post_jvmti_exception_throw(JavaThread* thread);
 
   static void throw_range_check_exception(JavaThread* thread, int index);
   static void throw_index_exception(JavaThread* thread, int index);
--- a/src/share/vm/c1/c1_globals.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/c1/c1_globals.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -278,7 +278,7 @@
   product(intx, CompilationRepeat, 0,                                       \
           "Number of times to recompile method before returning result")    \
                                                                             \
-  develop(intx, NMethodSizeLimit, (32*K)*wordSize,                          \
+  develop(intx, NMethodSizeLimit, (64*K)*wordSize,                          \
           "Maximum size of a compiled method.")                             \
                                                                             \
   develop(bool, TraceFPUStack, false,                                       \
--- a/src/share/vm/ci/ciArrayKlass.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciArrayKlass.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 // This class, and its subclasses represent klassOops in the
 // HotSpot virtual machine whose Klass part is an arrayKlass.
 class ciArrayKlass : public ciKlass {
+  CI_PACKAGE_ACCESS
 private:
   jint _dimension;
 
--- a/src/share/vm/ci/ciCallProfile.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciCallProfile.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -79,6 +79,17 @@
     assert(i < _limit, "out of Call Profile MorphismLimit");
     return _receiver[i];
   }
+
+  // Rescale the current profile based on the incoming scale
+  ciCallProfile rescale(double scale) {
+    assert(scale >= 0 && scale <= 1.0, "out of range");
+    ciCallProfile call = *this;
+    call._count = (int)(call._count * scale);
+    for (int i = 0; i < _morphism; i++) {
+      call._receiver_count[i] = (int)(call._receiver_count[i] * scale);
+    }
+    return call;
+  }
 };
 
 #endif // SHARE_VM_CI_CICALLPROFILE_HPP
--- a/src/share/vm/ci/ciCallSite.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciCallSite.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -28,6 +28,16 @@
 
 // ciCallSite
 
+bool ciCallSite::is_constant_call_site() {
+  return klass()->is_subclass_of(CURRENT_ENV->ConstantCallSite_klass());
+}
+bool ciCallSite::is_mutable_call_site() {
+  return klass()->is_subclass_of(CURRENT_ENV->MutableCallSite_klass());
+}
+bool ciCallSite::is_volatile_call_site() {
+  return klass()->is_subclass_of(CURRENT_ENV->VolatileCallSite_klass());
+}
+
 // ------------------------------------------------------------------
 // ciCallSite::get_target
 //
--- a/src/share/vm/ci/ciCallSite.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciCallSite.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -37,6 +37,10 @@
   // What kind of ciObject is this?
   bool is_call_site() const { return true; }
 
+  bool is_constant_call_site();
+  bool is_mutable_call_site();
+  bool is_volatile_call_site();
+
   // Return the target MethodHandle of this CallSite.
   ciMethodHandle* get_target() const;
 
--- a/src/share/vm/ci/ciClassList.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciClassList.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -77,12 +77,14 @@
 
 // Everyone gives access to ciObjectFactory
 #define CI_PACKAGE_ACCESS \
-friend class ciObjectFactory;
+friend class ciObjectFactory; \
+friend class VMStructs;
 
 // These are the packages that have access to ciEnv
 // Any more access must be given explicitly.
 #define CI_PACKAGE_ACCESS_TO           \
 friend class ciObjectFactory;          \
+friend class VMStructs;                \
 friend class ciCallSite;               \
 friend class ciConstantPoolCache;      \
 friend class ciField;                  \
--- a/src/share/vm/ci/ciConstant.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciConstant.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 //
 // This class represents a constant value.
 class ciConstant VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
 private:
   friend class ciEnv;
   friend class ciField;
@@ -46,9 +47,6 @@
     ciObject* _object;
   } _value;
 
-  // Implementation of the print method.
-  void print_impl(outputStream* st);
-
 public:
 
   ciConstant() {
--- a/src/share/vm/ci/ciEnv.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciEnv.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -884,42 +884,63 @@
 }
 
 // ------------------------------------------------------------------
-// ciEnv::check_for_system_dictionary_modification
-// Check for changes to the system dictionary during compilation
-// class loads, evolution, breakpoints
-void ciEnv::check_for_system_dictionary_modification(ciMethod* target) {
+// ciEnv::validate_compile_task_dependencies
+//
+// Check for changes during compilation (e.g. class loads, evolution,
+// breakpoints, call site invalidation).
+void ciEnv::validate_compile_task_dependencies(ciMethod* target) {
   if (failing())  return;  // no need for further checks
 
-  // Dependencies must be checked when the system dictionary changes.
-  // If logging is enabled all violated dependences will be recorded in
-  // the log.  In debug mode check dependencies even if the system
-  // dictionary hasn't changed to verify that no invalid dependencies
-  // were inserted.  Any violated dependences in this case are dumped to
-  // the tty.
-
-  bool counter_changed = system_dictionary_modification_counter_changed();
-  bool test_deps = counter_changed;
-  DEBUG_ONLY(test_deps = true);
-  if (!test_deps)  return;
-
-  bool print_failures = false;
-  DEBUG_ONLY(print_failures = !counter_changed);
-
-  bool keep_going = (print_failures || xtty != NULL);
-
-  int violated = 0;
-
+  // First, check non-klass dependencies as we might return early and
+  // not check klass dependencies if the system dictionary
+  // modification counter hasn't changed (see below).
   for (Dependencies::DepStream deps(dependencies()); deps.next(); ) {
+    if (deps.is_klass_type())  continue;  // skip klass dependencies
     klassOop witness = deps.check_dependency();
     if (witness != NULL) {
-      ++violated;
-      if (print_failures)  deps.print_dependency(witness, /*verbose=*/ true);
-      // If there's no log and we're not sanity-checking, we're done.
-      if (!keep_going)     break;
+      record_failure("invalid non-klass dependency");
+      return;
     }
   }
 
-  if (violated != 0) {
+  // Klass dependencies must be checked when the system dictionary
+  // changes.  If logging is enabled all violated dependences will be
+  // recorded in the log.  In debug mode check dependencies even if
+  // the system dictionary hasn't changed to verify that no invalid
+  // dependencies were inserted.  Any violated dependences in this
+  // case are dumped to the tty.
+  bool counter_changed = system_dictionary_modification_counter_changed();
+
+  bool verify_deps = trueInDebug;
+  if (!counter_changed && !verify_deps)  return;
+
+  int klass_violations = 0;
+  for (Dependencies::DepStream deps(dependencies()); deps.next(); ) {
+    if (!deps.is_klass_type())  continue;  // skip non-klass dependencies
+    klassOop witness = deps.check_dependency();
+    if (witness != NULL) {
+      klass_violations++;
+      if (!counter_changed) {
+        // Dependence failed but counter didn't change.  Log a message
+        // describing what failed and allow the assert at the end to
+        // trigger.
+        deps.print_dependency(witness);
+      } else if (xtty == NULL) {
+        // If we're not logging then a single violation is sufficient,
+        // otherwise we want to log all the dependences which were
+        // violated.
+        break;
+      }
+    }
+  }
+
+  if (klass_violations != 0) {
+#ifdef ASSERT
+    if (!counter_changed && !PrintCompilation) {
+      // Print out the compile task that failed
+      _task->print_line();
+    }
+#endif
     assert(counter_changed, "failed dependencies, but counter didn't change");
     record_failure("concurrent class loading");
   }
@@ -938,7 +959,6 @@
                             ImplicitExceptionTable* inc_table,
                             AbstractCompiler* compiler,
                             int comp_level,
-                            bool has_debug_info,
                             bool has_unsafe_access) {
   VM_ENTRY_MARK;
   nmethod* nm = NULL;
@@ -978,8 +998,8 @@
       // Encode the dependencies now, so we can check them right away.
       dependencies()->encode_content_bytes();
 
-      // Check for {class loads, evolution, breakpoints} during compilation
-      check_for_system_dictionary_modification(target);
+      // Check for {class loads, evolution, breakpoints, ...} during compilation
+      validate_compile_task_dependencies(target);
     }
 
     methodHandle method(THREAD, target->get_methodOop());
@@ -1033,7 +1053,6 @@
         CompileBroker::handle_full_code_cache();
       }
     } else {
-      NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); )
       nm->set_has_unsafe_access(has_unsafe_access);
 
       // Record successful registration.
--- a/src/share/vm/ci/ciEnv.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciEnv.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -247,9 +247,9 @@
   // Is this thread currently in the VM state?
   static bool is_in_vm();
 
-  // Helper routine for determining the validity of a compilation
-  // with respect to concurrent class loading.
-  void check_for_system_dictionary_modification(ciMethod* target);
+  // Helper routine for determining the validity of a compilation with
+  // respect to method dependencies (e.g. concurrent class loading).
+  void validate_compile_task_dependencies(ciMethod* target);
 
 public:
   enum {
@@ -317,8 +317,7 @@
                        ImplicitExceptionTable*   inc_table,
                        AbstractCompiler*         compiler,
                        int                       comp_level,
-                       bool                      has_debug_info = true,
-                       bool                      has_unsafe_access = false);
+                       bool                      has_unsafe_access);
 
 
   // Access to certain well known ciObjects.
--- a/src/share/vm/ci/ciField.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciField.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,9 +64,6 @@
   // shared constructor code
   void initialize_from(fieldDescriptor* fd);
 
-  // The implementation of the print method.
-  void print_impl(outputStream* st);
-
 public:
   ciFlags flags() { return _flags; }
 
@@ -178,6 +175,13 @@
   bool is_volatile    () { return flags().is_volatile(); }
   bool is_transient   () { return flags().is_transient(); }
 
+  bool is_call_site_target() {
+    ciInstanceKlass* callsite_klass = CURRENT_ENV->CallSite_klass();
+    if (callsite_klass == NULL)
+      return false;
+    return (holder()->is_subclass_of(callsite_klass) && (name() == ciSymbol::target_name()));
+  }
+
   // Debugging output
   void print();
   void print_name_on(outputStream* st);
--- a/src/share/vm/ci/ciInstanceKlass.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciInstanceKlass.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -31,6 +31,7 @@
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "oops/fieldStreams.hpp"
 #include "runtime/fieldDescriptor.hpp"
 
 // ciInstanceKlass
@@ -412,7 +413,7 @@
     VM_ENTRY_MARK;
     ciEnv* curEnv = ciEnv::current();
     instanceKlass* ik = get_instanceKlass();
-    int max_n_fields = ik->fields()->length()/instanceKlass::next_offset;
+    int max_n_fields = ik->java_fields_count();
 
     Arena* arena = curEnv->arena();
     _non_static_fields =
@@ -476,23 +477,6 @@
   // Now sort them by offset, ascending.
   // (In principle, they could mix with superclass fields.)
   fields->sort(sort_field_by_offset);
-#ifdef ASSERT
-  int last_offset = instanceOopDesc::base_offset_in_bytes();
-  for (int i = 0; i < fields->length(); i++) {
-    ciField* field = fields->at(i);
-    int offset = field->offset_in_bytes();
-    int size   = (field->_type == NULL) ? heapOopSize : field->size_in_bytes();
-    assert(last_offset <= offset, err_msg("no field overlap: %d <= %d", last_offset, offset));
-    if (last_offset > (int)sizeof(oopDesc))
-      assert((offset - last_offset) < BytesPerLong, "no big holes");
-    // Note:  Two consecutive T_BYTE fields will be separated by wordSize-1
-    // padding bytes if one of them is declared by a superclass.
-    // This is a minor inefficiency classFileParser.cpp.
-    last_offset = offset + size;
-  }
-  assert(last_offset <= (int)instanceOopDesc::base_offset_in_bytes() + fsize, "no overflow");
-#endif
-
   _nonstatic_fields = fields;
   return flen;
 }
@@ -505,33 +489,29 @@
   int flen = 0;
   GrowableArray<ciField*>* fields = NULL;
   instanceKlass* k = get_instanceKlass();
-  typeArrayOop fields_array = k->fields();
-  for (int pass = 0; pass <= 1; pass++) {
-    for (int i = 0, alen = fields_array->length(); i < alen; i += instanceKlass::next_offset) {
-      fieldDescriptor fd;
-      fd.initialize(k->as_klassOop(), i);
-      if (fd.is_static())  continue;
-      if (pass == 0) {
-        flen += 1;
-      } else {
-        ciField* field = new (arena) ciField(&fd);
-        fields->append(field);
-      }
-    }
+  for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
+    if (fs.access_flags().is_static())  continue;
+    flen += 1;
+  }
 
-    // Between passes, allocate the array:
-    if (pass == 0) {
-      if (flen == 0) {
-        return NULL;  // return nothing if none are locally declared
-      }
-      if (super_fields != NULL) {
-        flen += super_fields->length();
-      }
-      fields = new (arena) GrowableArray<ciField*>(arena, flen, 0, NULL);
-      if (super_fields != NULL) {
-        fields->appendAll(super_fields);
-      }
-    }
+  // allocate the array:
+  if (flen == 0) {
+    return NULL;  // return nothing if none are locally declared
+  }
+  if (super_fields != NULL) {
+    flen += super_fields->length();
+  }
+  fields = new (arena) GrowableArray<ciField*>(arena, flen, 0, NULL);
+  if (super_fields != NULL) {
+    fields->appendAll(super_fields);
+  }
+
+  for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
+    if (fs.access_flags().is_static())  continue;
+    fieldDescriptor fd;
+    fd.initialize(k->as_klassOop(), fs.index());
+    ciField* field = new (arena) ciField(&fd);
+    fields->append(field);
   }
   assert(fields->length() == flen, "sanity");
   return fields;
--- a/src/share/vm/ci/ciMethod.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciMethod.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1010,6 +1010,40 @@
   return 0;
 }
 
+int ciMethod::highest_osr_comp_level() {
+  check_is_loaded();
+  VM_ENTRY_MARK;
+  return get_methodOop()->highest_osr_comp_level();
+}
+
+// ------------------------------------------------------------------
+// ciMethod::code_size_for_inlining
+//
+// Code size for inlining decisions.
+//
+// Don't fully count method handle adapters against inlining budgets:
+// the metric we use here is the number of call sites in the adapter
+// as they are probably the instructions which generate some code.
+int ciMethod::code_size_for_inlining() {
+  check_is_loaded();
+
+  // Method handle adapters
+  if (is_method_handle_adapter()) {
+    // Count call sites
+    int call_site_count = 0;
+    ciBytecodeStream iter(this);
+    while (iter.next() != ciBytecodeStream::EOBC()) {
+      if (Bytecodes::is_invoke(iter.cur_bc())) {
+        call_site_count++;
+      }
+    }
+    return call_site_count;
+  }
+
+  // Normal method
+  return code_size();
+}
+
 // ------------------------------------------------------------------
 // ciMethod::instructions_size
 //
--- a/src/share/vm/ci/ciMethod.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciMethod.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -157,7 +157,11 @@
   int interpreter_invocation_count() const       { check_is_loaded(); return _interpreter_invocation_count; }
   int interpreter_throwout_count() const         { check_is_loaded(); return _interpreter_throwout_count; }
 
+  // Code size for inlining decisions.
+  int code_size_for_inlining();
+
   int comp_level();
+  int highest_osr_comp_level();
 
   Bytecodes::Code java_code_at_bci(int bci) {
     address bcp = code() + bci;
--- a/src/share/vm/ci/ciMethodHandle.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciMethodHandle.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -37,7 +37,7 @@
 // ciMethodHandle::get_adapter
 //
 // Return an adapter for this MethodHandle.
-ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const {
+ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) {
   VM_ENTRY_MARK;
   Handle h(get_oop());
   methodHandle callee(_callee->get_methodOop());
@@ -73,7 +73,7 @@
 // ciMethodHandle::get_adapter
 //
 // Return an adapter for this MethodHandle.
-ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
+ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) {
   ciMethod* result = get_adapter_impl(is_invokedynamic);
   if (result) {
     // Fake up the MDO maturity.
@@ -86,11 +86,22 @@
 }
 
 
+#ifndef PRODUCT
 // ------------------------------------------------------------------
-// ciMethodHandle::print_impl
+// ciMethodHandle::print_chain_impl
 //
 // Implementation of the print method.
-void ciMethodHandle::print_impl(outputStream* st) {
-  st->print(" type=");
-  get_oop()->print();
+void ciMethodHandle::print_chain_impl(outputStream* st) {
+  ASSERT_IN_VM;
+  MethodHandleChain::print(get_oop());
 }
+
+
+// ------------------------------------------------------------------
+// ciMethodHandle::print_chain
+//
+// Implementation of the print_chain method.
+void ciMethodHandle::print_chain(outputStream* st) {
+  GUARDED_VM_ENTRY(print_chain_impl(st););
+}
+#endif
--- a/src/share/vm/ci/ciMethodHandle.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciMethodHandle.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -37,19 +37,23 @@
   ciMethod*      _callee;
   ciMethod*      _caller;
   ciCallProfile  _profile;
+  ciMethod*      _method_handle_adapter;
+  ciMethod*      _invokedynamic_adapter;
 
   // Return an adapter for this MethodHandle.
-  ciMethod* get_adapter_impl(bool is_invokedynamic) const;
-  ciMethod* get_adapter(     bool is_invokedynamic) const;
+  ciMethod* get_adapter_impl(bool is_invokedynamic);
+  ciMethod* get_adapter(     bool is_invokedynamic);
 
 protected:
-  void print_impl(outputStream* st);
+  void print_chain_impl(outputStream* st) PRODUCT_RETURN;
 
 public:
   ciMethodHandle(instanceHandle h_i) :
     ciInstance(h_i),
     _callee(NULL),
-    _caller(NULL)
+    _caller(NULL),
+    _method_handle_adapter(NULL),
+    _invokedynamic_adapter(NULL)
   {}
 
   // What kind of ciObject is this?
@@ -60,10 +64,22 @@
   void set_call_profile(ciCallProfile profile)  { _profile = profile; }
 
   // Return an adapter for a MethodHandle call.
-  ciMethod* get_method_handle_adapter() const { return get_adapter(false); }
+  ciMethod* get_method_handle_adapter() {
+    if (_method_handle_adapter == NULL) {
+      _method_handle_adapter = get_adapter(false);
+    }
+    return _method_handle_adapter;
+  }
 
   // Return an adapter for an invokedynamic call.
-  ciMethod* get_invokedynamic_adapter() const { return get_adapter(true);  }
+  ciMethod* get_invokedynamic_adapter() {
+    if (_invokedynamic_adapter == NULL) {
+      _invokedynamic_adapter = get_adapter(true);
+    }
+    return _invokedynamic_adapter;
+  }
+
+  void print_chain(outputStream* st = tty) PRODUCT_RETURN;
 };
 
 #endif // SHARE_VM_CI_CIMETHODHANDLE_HPP
--- a/src/share/vm/ci/ciObject.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciObject.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -194,16 +194,26 @@
 // ciObject::should_be_constant()
 bool ciObject::should_be_constant() {
   if (ScavengeRootsInCode >= 2)  return true;  // force everybody to be a constant
-  if (!JavaObjectsInPerm && !is_null_object()) {
+  if (is_null_object()) return true;
+
+  ciEnv* env = CURRENT_ENV;
+  if (!JavaObjectsInPerm) {
     // We want Strings and Classes to be embeddable by default since
     // they used to be in the perm world.  Not all Strings used to be
     // embeddable but there's no easy way to distinguish the interned
     // from the regulars ones so just treat them all that way.
-    ciEnv* env = CURRENT_ENV;
     if (klass() == env->String_klass() || klass() == env->Class_klass()) {
       return true;
     }
   }
+  if (EnableInvokeDynamic &&
+      (klass()->is_subclass_of(env->MethodHandle_klass()) ||
+       klass()->is_subclass_of(env->CallSite_klass()))) {
+    assert(ScavengeRootsInCode >= 1, "must be");
+    // We want to treat these aggressively.
+    return true;
+  }
+
   return handle() == NULL || is_perm();
 }
 
--- a/src/share/vm/ci/ciObjectFactory.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciObjectFactory.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -36,6 +36,9 @@
 // which ensures that for each oop, at most one ciObject is created.
 // This invariant allows efficient implementation of ciObject.
 class ciObjectFactory : public ResourceObj {
+  friend class VMStructs;
+  friend class ciEnv;
+
 private:
   static volatile bool _initialized;
   static GrowableArray<ciObject*>* _shared_ci_objects;
--- a/src/share/vm/ci/ciStreams.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/ci/ciStreams.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -129,7 +129,8 @@
   // Return current ByteCode and increment PC to next bytecode, skipping all
   // intermediate constants.  Returns EOBC at end.
   // Expected usage:
-  //     while( (bc = iter.next()) != EOBC() ) { ... }
+  //     ciBytecodeStream iter(m);
+  //     while (iter.next() != ciBytecodeStream::EOBC()) { ... }
   Bytecodes::Code next() {
     _bc_start = _pc;                        // Capture start of bc
     if( _pc >= _end ) return EOBC();        // End-Of-Bytecodes
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -36,6 +36,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/constantPoolOop.hpp"
+#include "oops/fieldStreams.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/klass.inline.hpp"
@@ -991,42 +992,98 @@
   STATIC_BYTE,          // Boolean, Byte, char
   STATIC_SHORT,         // shorts
   STATIC_WORD,          // ints
-  STATIC_DOUBLE,        // long or double
-  STATIC_ALIGNED_DOUBLE,// aligned long or double
+  STATIC_DOUBLE,        // aligned long or double
   NONSTATIC_OOP,
   NONSTATIC_BYTE,
   NONSTATIC_SHORT,
   NONSTATIC_WORD,
   NONSTATIC_DOUBLE,
-  NONSTATIC_ALIGNED_DOUBLE
+  MAX_FIELD_ALLOCATION_TYPE,
+  BAD_ALLOCATION_TYPE = -1
 };
 
-
-struct FieldAllocationCount {
-  unsigned int static_oop_count;
-  unsigned int static_byte_count;
-  unsigned int static_short_count;
-  unsigned int static_word_count;
-  unsigned int static_double_count;
-  unsigned int nonstatic_oop_count;
-  unsigned int nonstatic_byte_count;
-  unsigned int nonstatic_short_count;
-  unsigned int nonstatic_word_count;
-  unsigned int nonstatic_double_count;
+static FieldAllocationType _basic_type_to_atype[2 * (T_CONFLICT + 1)] = {
+  BAD_ALLOCATION_TYPE, // 0
+  BAD_ALLOCATION_TYPE, // 1
+  BAD_ALLOCATION_TYPE, // 2
+  BAD_ALLOCATION_TYPE, // 3
+  NONSTATIC_BYTE ,     // T_BOOLEAN  =  4,
+  NONSTATIC_SHORT,     // T_CHAR     =  5,
+  NONSTATIC_WORD,      // T_FLOAT    =  6,
+  NONSTATIC_DOUBLE,    // T_DOUBLE   =  7,
+  NONSTATIC_BYTE,      // T_BYTE     =  8,
+  NONSTATIC_SHORT,     // T_SHORT    =  9,
+  NONSTATIC_WORD,      // T_INT      = 10,
+  NONSTATIC_DOUBLE,    // T_LONG     = 11,
+  NONSTATIC_OOP,       // T_OBJECT   = 12,
+  NONSTATIC_OOP,       // T_ARRAY    = 13,
+  BAD_ALLOCATION_TYPE, // T_VOID     = 14,
+  BAD_ALLOCATION_TYPE, // T_ADDRESS  = 15,
+  BAD_ALLOCATION_TYPE, // T_NARROWOOP= 16,
+  BAD_ALLOCATION_TYPE, // T_CONFLICT = 17,
+  BAD_ALLOCATION_TYPE, // 0
+  BAD_ALLOCATION_TYPE, // 1
+  BAD_ALLOCATION_TYPE, // 2
+  BAD_ALLOCATION_TYPE, // 3
+  STATIC_BYTE ,        // T_BOOLEAN  =  4,
+  STATIC_SHORT,        // T_CHAR     =  5,
+  STATIC_WORD,          // T_FLOAT    =  6,
+  STATIC_DOUBLE,       // T_DOUBLE   =  7,
+  STATIC_BYTE,         // T_BYTE     =  8,
+  STATIC_SHORT,        // T_SHORT    =  9,
+  STATIC_WORD,         // T_INT      = 10,
+  STATIC_DOUBLE,       // T_LONG     = 11,
+  STATIC_OOP,          // T_OBJECT   = 12,
+  STATIC_OOP,          // T_ARRAY    = 13,
+  BAD_ALLOCATION_TYPE, // T_VOID     = 14,
+  BAD_ALLOCATION_TYPE, // T_ADDRESS  = 15,
+  BAD_ALLOCATION_TYPE, // T_NARROWOOP= 16,
+  BAD_ALLOCATION_TYPE, // T_CONFLICT = 17,
 };
 
-typeArrayHandle ClassFileParser::parse_fields(constantPoolHandle cp, bool is_interface,
-                                              struct FieldAllocationCount *fac,
-                                              objArrayHandle* fields_annotations, TRAPS) {
+static FieldAllocationType basic_type_to_atype(bool is_static, BasicType type) {
+  assert(type >= T_BOOLEAN && type < T_VOID, "only allowable values");
+  FieldAllocationType result = _basic_type_to_atype[type + (is_static ? (T_CONFLICT + 1) : 0)];
+  assert(result != BAD_ALLOCATION_TYPE, "bad type");
+  return result;
+}
+
+class FieldAllocationCount: public ResourceObj {
+ public:
+  unsigned int count[MAX_FIELD_ALLOCATION_TYPE];
+
+  FieldAllocationCount() {
+    for (int i = 0; i < MAX_FIELD_ALLOCATION_TYPE; i++) {
+      count[i] = 0;
+    }
+  }
+
+  FieldAllocationType update(bool is_static, BasicType type) {
+    FieldAllocationType atype = basic_type_to_atype(is_static, type);
+    count[atype]++;
+    return atype;
+  }
+};
+
+
+typeArrayHandle ClassFileParser::parse_fields(Symbol* class_name,
+                                              constantPoolHandle cp, bool is_interface,
+                                              FieldAllocationCount *fac,
+                                              objArrayHandle* fields_annotations,
+                                              int* java_fields_count_ptr, TRAPS) {
   ClassFileStream* cfs = stream();
   typeArrayHandle nullHandle;
   cfs->guarantee_more(2, CHECK_(nullHandle));  // length
   u2 length = cfs->get_u2_fast();
+  *java_fields_count_ptr = length;
+
+  int num_injected = 0;
+  InjectedField* injected = JavaClasses::get_injected(class_name, &num_injected);
+
   // Tuples of shorts [access, name index, sig index, initial value index, byte offset, generic signature index]
-  typeArrayOop new_fields = oopFactory::new_permanent_shortArray(length*instanceKlass::next_offset, CHECK_(nullHandle));
+  typeArrayOop new_fields = oopFactory::new_permanent_shortArray((length + num_injected) * FieldInfo::field_slots, CHECK_(nullHandle));
   typeArrayHandle fields(THREAD, new_fields);
 
-  int index = 0;
   typeArrayHandle field_annotations;
   for (int n = 0; n < length; n++) {
     cfs->guarantee_more(8, CHECK_(nullHandle));  // access_flags, name_index, descriptor_index, attributes_count
@@ -1077,93 +1134,77 @@
       }
     }
 
-    fields->short_at_put(index++, access_flags.as_short());
-    fields->short_at_put(index++, name_index);
-    fields->short_at_put(index++, signature_index);
-    fields->short_at_put(index++, constantvalue_index);
+    FieldInfo* field = FieldInfo::from_field_array(fields(), n);
+    field->initialize(access_flags.as_short(),
+                      name_index,
+                      signature_index,
+                      constantvalue_index,
+                      generic_signature_index,
+                      0);
+
+    BasicType type = cp->basic_type_for_signature_at(signature_index);
 
     // Remember how many oops we encountered and compute allocation type
-    BasicType type = cp->basic_type_for_signature_at(signature_index);
-    FieldAllocationType atype;
-    if ( is_static ) {
-      switch ( type ) {
-        case  T_BOOLEAN:
-        case  T_BYTE:
-          fac->static_byte_count++;
-          atype = STATIC_BYTE;
-          break;
-        case  T_LONG:
-        case  T_DOUBLE:
-          if (Universe::field_type_should_be_aligned(type)) {
-            atype = STATIC_ALIGNED_DOUBLE;
-          } else {
-            atype = STATIC_DOUBLE;
-          }
-          fac->static_double_count++;
-          break;
-        case  T_CHAR:
-        case  T_SHORT:
-          fac->static_short_count++;
-          atype = STATIC_SHORT;
-          break;
-        case  T_FLOAT:
-        case  T_INT:
-          fac->static_word_count++;
-          atype = STATIC_WORD;
-          break;
-        case  T_ARRAY:
-        case  T_OBJECT:
-          fac->static_oop_count++;
-          atype = STATIC_OOP;
-          break;
-        case  T_ADDRESS:
-        case  T_VOID:
-        default:
-          assert(0, "bad field type");
-      }
-    } else {
-      switch ( type ) {
-        case  T_BOOLEAN:
-        case  T_BYTE:
-          fac->nonstatic_byte_count++;
-          atype = NONSTATIC_BYTE;
-          break;
-        case  T_LONG:
-        case  T_DOUBLE:
-          if (Universe::field_type_should_be_aligned(type)) {
-            atype = NONSTATIC_ALIGNED_DOUBLE;
-          } else {
-            atype = NONSTATIC_DOUBLE;
-          }
-          fac->nonstatic_double_count++;
-          break;
-        case  T_CHAR:
-        case  T_SHORT:
-          fac->nonstatic_short_count++;
-          atype = NONSTATIC_SHORT;
-          break;
-        case  T_FLOAT:
-        case  T_INT:
-          fac->nonstatic_word_count++;
-          atype = NONSTATIC_WORD;
-          break;
-        case  T_ARRAY:
-        case  T_OBJECT:
-          fac->nonstatic_oop_count++;
-          atype = NONSTATIC_OOP;
-          break;
-        case  T_ADDRESS:
-        case  T_VOID:
-        default:
-          assert(0, "bad field type");
-      }
-    }
+    FieldAllocationType atype = fac->update(is_static, type);
 
     // The correct offset is computed later (all oop fields will be located together)
     // We temporarily store the allocation type in the offset field
-    fields->short_at_put(index++, atype);
-    fields->short_at_put(index++, 0);  // Clear out high word of byte offset
-    fields->short_at_put(index++, generic_signature_index);
+    field->set_offset(atype);
+  }
+
+  if (num_injected != 0) {
+    int index = length;
+    for (int n = 0; n < num_injected; n++) {
+      // Check for duplicates
+      if (injected[n].may_be_java) {
+        Symbol* name      = injected[n].name();
+        Symbol* signature = injected[n].signature();
+        bool duplicate = false;
+        for (int i = 0; i < length; i++) {
+          FieldInfo* f = FieldInfo::from_field_array(fields(), i);
+          if (name      == cp->symbol_at(f->name_index()) &&
+              signature == cp->symbol_at(f->signature_index())) {
+            // Symbol is desclared in Java so skip this one
+            duplicate = true;
+            break;
+          }
+        }
+        if (duplicate) {
+          // These will be removed from the field array at the end
+          continue;
+        }
+      }
+
+      // Injected field
+      FieldInfo* field = FieldInfo::from_field_array(fields(), index);
+      field->initialize(JVM_ACC_FIELD_INTERNAL,
+                        injected[n].name_index,
+                        injected[n].signature_index,
+                        0,
+                        0,
+                        0);
+
+      BasicType type = FieldType::basic_type(injected[n].signature());
+
+      // Remember how many oops we encountered and compute allocation type
+      FieldAllocationType atype = fac->update(false, type);
+
+      // The correct offset is computed later (all oop fields will be located together)
+      // We temporarily store the allocation type in the offset field
+      field->set_offset(atype);
+      index++;
+    }
+
+    if (index < length + num_injected) {
+      // sometimes injected fields already exist in the Java source so
+      // the fields array could be too long.  In that case trim the
+      // fields array.
+      new_fields = oopFactory::new_permanent_shortArray(index * FieldInfo::field_slots, CHECK_(nullHandle));
+      for (int i = 0; i < index * FieldInfo::field_slots; i++) {
+        new_fields->short_at_put(i, fields->short_at(i));
+      }
+      fields = new_fields;
+    }
   }
 
   if (_need_verify && length > 1) {
@@ -1175,11 +1216,9 @@
     bool dup = false;
     {
       debug_only(No_Safepoint_Verifier nsv;)
-      for (int i = 0; i < length*instanceKlass::next_offset; i += instanceKlass::next_offset) {
-        int name_index = fields->ushort_at(i + instanceKlass::name_index_offset);
-        Symbol* name = cp->symbol_at(name_index);
-        int sig_index = fields->ushort_at(i + instanceKlass::signature_index_offset);
-        Symbol* sig = cp->symbol_at(sig_index);
+      for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+        Symbol* name = fs.name();
+        Symbol* sig = fs.signature();
         // If no duplicates, add name/signature in hashtable names_and_sigs.
         if (!put_after_lookup(name, sig, names_and_sigs)) {
           dup = true;
@@ -2592,227 +2631,6 @@
 }
 
 
-void ClassFileParser::java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr,
-  constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS) {
-  // This code is for compatibility with earlier jdk's that do not
-  // have the "discovered" field in java.lang.ref.Reference.  For 1.5
-  // the check for the "discovered" field should issue a warning if
-  // the field is not found.  For 1.6 this code should be issue a
-  // fatal error if the "discovered" field is not found.
-  //
-  // Increment fac.nonstatic_oop_count so that the start of the
-  // next type of non-static oops leaves room for the fake oop.
-  // Do not increment next_nonstatic_oop_offset so that the
-  // fake oop is place after the java.lang.ref.Reference oop
-  // fields.
-  //
-  // Check the fields in java.lang.ref.Reference for the "discovered"
-  // field.  If it is not present, artifically create a field for it.
-  // This allows this VM to run on early JDK where the field is not
-  // present.
-  int reference_sig_index = 0;
-  int reference_name_index = 0;
-  int reference_index = 0;
-  int extra = java_lang_ref_Reference::number_of_fake_oop_fields;
-  const int n = (*fields_ptr)()->length();
-  for (int i = 0; i < n; i += instanceKlass::next_offset ) {
-    int name_index =
-    (*fields_ptr)()->ushort_at(i + instanceKlass::name_index_offset);
-    int sig_index  =
-      (*fields_ptr)()->ushort_at(i + instanceKlass::signature_index_offset);
-    Symbol* f_name = cp->symbol_at(name_index);
-    Symbol* f_sig  = cp->symbol_at(sig_index);
-    if (f_sig == vmSymbols::reference_signature() && reference_index == 0) {
-      // Save the index for reference signature for later use.
-      // The fake discovered field does not entries in the
-      // constant pool so the index for its signature cannot
-      // be extracted from the constant pool.  It will need
-      // later, however.  It's signature is vmSymbols::reference_signature()
-      // so same an index for that signature.
-      reference_sig_index = sig_index;
-      reference_name_index = name_index;
-      reference_index = i;
-    }
-    if (f_name == vmSymbols::reference_discovered_name() &&
-      f_sig == vmSymbols::reference_signature()) {
-      // The values below are fake but will force extra
-      // non-static oop fields and a corresponding non-static
-      // oop map block to be allocated.
-      extra = 0;
-      break;
-    }
-  }
-  if (extra != 0) {
-    fac_ptr->nonstatic_oop_count += extra;
-    // Add the additional entry to "fields" so that the klass
-    // contains the "discoverd" field and the field will be initialized
-    // in instances of the object.
-    int fields_with_fix_length = (*fields_ptr)()->length() +
-      instanceKlass::next_offset;
-    typeArrayOop ff = oopFactory::new_permanent_shortArray(
-                                                fields_with_fix_length, CHECK);
-    typeArrayHandle fields_with_fix(THREAD, ff);
-
-    // Take everything from the original but the length.
-    for (int idx = 0; idx < (*fields_ptr)->length(); idx++) {
-      fields_with_fix->ushort_at_put(idx, (*fields_ptr)->ushort_at(idx));
-    }
-
-    // Add the fake field at the end.
-    int i = (*fields_ptr)->length();
-    // There is no name index for the fake "discovered" field nor
-    // signature but a signature is needed so that the field will
-    // be properly initialized.  Use one found for
-    // one of the other reference fields. Be sure the index for the
-    // name is 0.  In fieldDescriptor::initialize() the index of the
-    // name is checked.  That check is by passed for the last nonstatic
-    // oop field in a java.lang.ref.Reference which is assumed to be
-    // this artificial "discovered" field.  An assertion checks that
-    // the name index is 0.
-    assert(reference_index != 0, "Missing signature for reference");
-
-    int j;
-    for (j = 0; j < instanceKlass::next_offset; j++) {
-      fields_with_fix->ushort_at_put(i + j,
-        (*fields_ptr)->ushort_at(reference_index +j));
-    }
-    // Clear the public access flag and set the private access flag.
-    short flags;
-    flags =
-      fields_with_fix->ushort_at(i + instanceKlass::access_flags_offset);
-    assert(!(flags & JVM_RECOGNIZED_FIELD_MODIFIERS), "Unexpected access flags set");
-    flags = flags & (~JVM_ACC_PUBLIC);
-    flags = flags | JVM_ACC_PRIVATE;
-    AccessFlags access_flags;
-    access_flags.set_flags(flags);
-    assert(!access_flags.is_public(), "Failed to clear public flag");
-    assert(access_flags.is_private(), "Failed to set private flag");
-    fields_with_fix->ushort_at_put(i + instanceKlass::access_flags_offset,
-      flags);
-
-    assert(fields_with_fix->ushort_at(i + instanceKlass::name_index_offset)
-      == reference_name_index, "The fake reference name is incorrect");
-    assert(fields_with_fix->ushort_at(i + instanceKlass::signature_index_offset)
-      == reference_sig_index, "The fake reference signature is incorrect");
-    // The type of the field is stored in the low_offset entry during
-    // parsing.
-    assert(fields_with_fix->ushort_at(i + instanceKlass::low_offset) ==
-      NONSTATIC_OOP, "The fake reference type is incorrect");
-
-    // "fields" is allocated in the permanent generation.  Disgard
-    // it and let it be collected.
-    (*fields_ptr) = fields_with_fix;
-  }
-  return;
-}
-
-
-void ClassFileParser::java_lang_Class_fix_pre(int* nonstatic_field_size,
-                                              FieldAllocationCount *fac_ptr) {
-  // Add fake fields for java.lang.Class instances
-  //
-  // This is not particularly nice. We should consider adding a
-  // private transient object field at the Java level to
-  // java.lang.Class. Alternatively we could add a subclass of
-  // instanceKlass which provides an accessor and size computer for
-  // this field, but that appears to be more code than this hack.
-  //
-  // NOTE that we wedge these in at the beginning rather than the
-  // end of the object because the Class layout changed between JDK
-  // 1.3 and JDK 1.4 with the new reflection implementation; some
-  // nonstatic oop fields were added at the Java level. The offsets
-  // of these fake fields can't change between these two JDK
-  // versions because when the offsets are computed at bootstrap
-  // time we don't know yet which version of the JDK we're running in.
-
-  // The values below are fake but will force three non-static oop fields and
-  // a corresponding non-static oop map block to be allocated.
-  const int extra = java_lang_Class::number_of_fake_oop_fields;
-  fac_ptr->nonstatic_oop_count += extra;
-
-  // Reserve some leading space for fake ints
-  *nonstatic_field_size += align_size_up(java_lang_Class::hc_number_of_fake_int_fields * BytesPerInt, heapOopSize) / heapOopSize;
-}
-
-
-void ClassFileParser::java_lang_Class_fix_post(int* next_nonstatic_oop_offset_ptr) {
-  // Cause the extra fake fields in java.lang.Class to show up before
-  // the Java fields for layout compatibility between 1.3 and 1.4
-  // Incrementing next_nonstatic_oop_offset here advances the
-  // location where the real java fields are placed.
-  const int extra = java_lang_Class::number_of_fake_oop_fields;
-  (*next_nonstatic_oop_offset_ptr) += (extra * heapOopSize);
-}
-
-
-// Force MethodHandle.vmentry to be an unmanaged pointer.
-// There is no way for a classfile to express this, so we must help it.
-void ClassFileParser::java_lang_invoke_MethodHandle_fix_pre(constantPoolHandle cp,
-                                                    typeArrayHandle fields,
-                                                    FieldAllocationCount *fac_ptr,
-                                                    TRAPS) {
-  // Add fake fields for java.lang.invoke.MethodHandle instances
-  //
-  // This is not particularly nice, but since there is no way to express
-  // a native wordSize field in Java, we must do it at this level.
-
-  if (!EnableInvokeDynamic)  return;
-
-  int word_sig_index = 0;
-  const int cp_size = cp->length();
-  for (int index = 1; index < cp_size; index++) {
-    if (cp->tag_at(index).is_utf8() &&
-        cp->symbol_at(index) == vmSymbols::machine_word_signature()) {
-      word_sig_index = index;
-      break;
-    }
-  }
-
-  if (word_sig_index == 0)
-    THROW_MSG(vmSymbols::java_lang_VirtualMachineError(),
-              "missing I or J signature (for vmentry) in java.lang.invoke.MethodHandle");
-
-  // Find vmentry field and change the signature.
-  bool found_vmentry = false;
-  for (int i = 0; i < fields->length(); i += instanceKlass::next_offset) {
-    int name_index = fields->ushort_at(i + instanceKlass::name_index_offset);
-    int sig_index  = fields->ushort_at(i + instanceKlass::signature_index_offset);
-    int acc_flags  = fields->ushort_at(i + instanceKlass::access_flags_offset);
-    Symbol* f_name = cp->symbol_at(name_index);
-    Symbol* f_sig  = cp->symbol_at(sig_index);
-    if (f_name == vmSymbols::vmentry_name() && (acc_flags & JVM_ACC_STATIC) == 0) {
-      if (f_sig == vmSymbols::machine_word_signature()) {
-        // If the signature of vmentry is already changed, we're done.
-        found_vmentry = true;
-        break;
-      }
-      else if (f_sig == vmSymbols::byte_signature()) {
-        // Adjust the field type from byte to an unmanaged pointer.
-        assert(fac_ptr->nonstatic_byte_count > 0, "");
-        fac_ptr->nonstatic_byte_count -= 1;
-
-        fields->ushort_at_put(i + instanceKlass::signature_index_offset, word_sig_index);
-        assert(wordSize == longSize || wordSize == jintSize, "ILP32 or LP64");
-        if (wordSize == longSize)  fac_ptr->nonstatic_double_count += 1;
-        else                       fac_ptr->nonstatic_word_count   += 1;
-
-        FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i + instanceKlass::low_offset);
-        assert(atype == NONSTATIC_BYTE, "");
-        FieldAllocationType new_atype = (wordSize == longSize) ? NONSTATIC_DOUBLE : NONSTATIC_WORD;
-        fields->ushort_at_put(i + instanceKlass::low_offset, new_atype);
-
-        found_vmentry = true;
-        break;
-      }
-    }
-  }
-
-  if (!found_vmentry)
-    THROW_MSG(vmSymbols::java_lang_VirtualMachineError(),
-              "missing vmentry byte field in java.lang.invoke.MethodHandle");
-}
-
-
 instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
                                                     Handle class_loader,
                                                     Handle protection_domain,
@@ -3025,10 +2843,13 @@
       local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, CHECK_(nullHandle));
     }
 
+    int java_fields_count = 0;
     // Fields (offsets are filled in later)
-    struct FieldAllocationCount fac = {0,0,0,0,0,0,0,0,0,0};
+    FieldAllocationCount fac;
     objArrayHandle fields_annotations;
-    typeArrayHandle fields = parse_fields(cp, access_flags.is_interface(), &fac, &fields_annotations, CHECK_(nullHandle));
+    typeArrayHandle fields = parse_fields(class_name, cp, access_flags.is_interface(), &fac, &fields_annotations,
+                                          &java_fields_count,
+                                          CHECK_(nullHandle));
     // Methods
     bool has_final_method = false;
     AccessFlags promoted_flags;
@@ -3146,51 +2967,33 @@
     // Calculate the starting byte offsets
     next_static_oop_offset      = instanceMirrorKlass::offset_of_static_fields();
     next_static_double_offset   = next_static_oop_offset +
-                                  (fac.static_oop_count * heapOopSize);
-    if ( fac.static_double_count &&
+                                  (fac.count[STATIC_OOP] * heapOopSize);
+    if ( fac.count[STATIC_DOUBLE] &&
          (Universe::field_type_should_be_aligned(T_DOUBLE) ||
           Universe::field_type_should_be_aligned(T_LONG)) ) {
       next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong);
     }
 
     next_static_word_offset     = next_static_double_offset +
-                                  (fac.static_double_count * BytesPerLong);
+                                  (fac.count[STATIC_DOUBLE] * BytesPerLong);
     next_static_short_offset    = next_static_word_offset +
-                                  (fac.static_word_count * BytesPerInt);
+                                  (fac.count[STATIC_WORD] * BytesPerInt);
     next_static_byte_offset     = next_static_short_offset +
-                                  (fac.static_short_count * BytesPerShort);
+                                  (fac.count[STATIC_SHORT] * BytesPerShort);
     next_static_type_offset     = align_size_up((next_static_byte_offset +
-                                  fac.static_byte_count ), wordSize );
+                                  fac.count[STATIC_BYTE] ), wordSize );
     static_field_size           = (next_static_type_offset -
                                   next_static_oop_offset) / wordSize;
 
-    // Add fake fields for java.lang.Class instances (also see below)
-    if (class_name == vmSymbols::java_lang_Class() && class_loader.is_null()) {
-      java_lang_Class_fix_pre(&nonstatic_field_size, &fac);
-    }
-
     first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() +
                                    nonstatic_field_size * heapOopSize;
     next_nonstatic_field_offset = first_nonstatic_field_offset;
 
-    // adjust the vmentry field declaration in java.lang.invoke.MethodHandle
-    if (EnableInvokeDynamic && class_name == vmSymbols::java_lang_invoke_MethodHandle() && class_loader.is_null()) {
-      java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
-    }
-
-    // Add a fake "discovered" field if it is not present
-    // for compatibility with earlier jdk's.
-    if (class_name == vmSymbols::java_lang_ref_Reference()
-      && class_loader.is_null()) {
-      java_lang_ref_Reference_fix_pre(&fields, cp, &fac, CHECK_(nullHandle));
-    }
-    // end of "discovered" field compactibility fix
-
-    unsigned int nonstatic_double_count = fac.nonstatic_double_count;
-    unsigned int nonstatic_word_count   = fac.nonstatic_word_count;
-    unsigned int nonstatic_short_count  = fac.nonstatic_short_count;
-    unsigned int nonstatic_byte_count   = fac.nonstatic_byte_count;
-    unsigned int nonstatic_oop_count    = fac.nonstatic_oop_count;
+    unsigned int nonstatic_double_count = fac.count[NONSTATIC_DOUBLE];
+    unsigned int nonstatic_word_count   = fac.count[NONSTATIC_WORD];
+    unsigned int nonstatic_short_count  = fac.count[NONSTATIC_SHORT];
+    unsigned int nonstatic_byte_count   = fac.count[NONSTATIC_BYTE];
+    unsigned int nonstatic_oop_count    = fac.count[NONSTATIC_OOP];
 
     bool super_has_nonstatic_fields =
             (super_klass() != NULL && super_klass->has_nonstatic_fields());
@@ -3210,20 +3013,7 @@
     nonstatic_oop_counts  = NEW_RESOURCE_ARRAY_IN_THREAD(
               THREAD, unsigned int, nonstatic_oop_count + 1);
 
-    // Add fake fields for java.lang.Class instances (also see above).
-    // FieldsAllocationStyle and CompactFields values will be reset to default.
-    if(class_name == vmSymbols::java_lang_Class() && class_loader.is_null()) {
-      java_lang_Class_fix_post(&next_nonstatic_field_offset);
-      nonstatic_oop_offsets[0] = first_nonstatic_field_offset;
-      const uint fake_oop_count = (next_nonstatic_field_offset -
-                                   first_nonstatic_field_offset) / heapOopSize;
-      nonstatic_oop_counts[0] = fake_oop_count;
-      nonstatic_oop_map_count = 1;
-      nonstatic_oop_count -= fake_oop_count;
-      first_nonstatic_oop_offset = first_nonstatic_field_offset;
-    } else {
-      first_nonstatic_oop_offset = 0; // will be set for first oop field
-    }
+    first_nonstatic_oop_offset = 0; // will be set for first oop field
 
 #ifndef PRODUCT
     if( PrintCompactFieldsSavings ) {
@@ -3287,9 +3077,9 @@
       // Fields allocation: oops fields in super and sub classes are together.
       if( nonstatic_field_size > 0 && super_klass() != NULL &&
           super_klass->nonstatic_oop_map_size() > 0 ) {
-        int map_size = super_klass->nonstatic_oop_map_size();
+        int map_count = super_klass->nonstatic_oop_map_count();
         OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps();
-        OopMapBlock* last_map = first_map + map_size - 1;
+        OopMapBlock* last_map = first_map + map_count - 1;
         int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
         if (next_offset == next_nonstatic_field_offset) {
           allocation_style = 0;   // allocate oops first
@@ -3378,10 +3168,9 @@
     // Iterate over fields again and compute correct offsets.
     // The field allocation type was temporarily stored in the offset slot.
     // oop fields are located before non-oop fields (static and non-static).
-    int len = fields->length();
-    for (int i = 0; i < len; i += instanceKlass::next_offset) {
+    for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
       int real_offset;
-      FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i + instanceKlass::low_offset);
+      FieldAllocationType atype = (FieldAllocationType) fs.offset();
       switch (atype) {
         case STATIC_OOP:
           real_offset = next_static_oop_offset;
@@ -3399,7 +3188,6 @@
           real_offset = next_static_word_offset;
           next_static_word_offset += BytesPerInt;
           break;
-        case STATIC_ALIGNED_DOUBLE:
         case STATIC_DOUBLE:
           real_offset = next_static_double_offset;
           next_static_double_offset += BytesPerLong;
@@ -3461,7 +3249,6 @@
             next_nonstatic_word_offset += BytesPerInt;
           }
           break;
-        case NONSTATIC_ALIGNED_DOUBLE:
         case NONSTATIC_DOUBLE:
           real_offset = next_nonstatic_double_offset;
           next_nonstatic_double_offset += BytesPerLong;
@@ -3469,8 +3256,7 @@
         default:
           ShouldNotReachHere();
       }
-      fields->short_at_put(i + instanceKlass::low_offset,  extract_low_short_from_int(real_offset));
-      fields->short_at_put(i + instanceKlass::high_offset, extract_high_short_from_int(real_offset));
+      fs.set_offset(real_offset);
     }
 
     // Size of instances
@@ -3517,12 +3303,12 @@
     this_klass->set_class_loader(class_loader());
     this_klass->set_nonstatic_field_size(nonstatic_field_size);
     this_klass->set_has_nonstatic_fields(has_nonstatic_fields);
-    this_klass->set_static_oop_field_count(fac.static_oop_count);
+    this_klass->set_static_oop_field_count(fac.count[STATIC_OOP]);
     cp->set_pool_holder(this_klass());
     error_handler.set_in_error(false);   // turn off error handler for cp
     this_klass->set_constants(cp());
     this_klass->set_local_interfaces(local_interfaces());
-    this_klass->set_fields(fields());
+    this_klass->set_fields(fields(), java_fields_count);
     this_klass->set_methods(methods());
     if (has_final_method) {
       this_klass->set_has_final_method();
--- a/src/share/vm/classfile/classFileParser.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/classfile/classFileParser.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -33,6 +33,9 @@
 #include "utilities/accessFlags.hpp"
 
 class TempNewSymbol;
+class FieldAllocationCount;
+
+
 // Parser for for .class files
 //
 // The bytes describing the class file structure is read from a Stream object
@@ -84,9 +87,11 @@
                               bool* is_synthetic_addr,
                               u2* generic_signature_index_addr,
                               typeArrayHandle* field_annotations, TRAPS);
-  typeArrayHandle parse_fields(constantPoolHandle cp, bool is_interface,
-                               struct FieldAllocationCount *fac,
-                               objArrayHandle* fields_annotations, TRAPS);
+  typeArrayHandle parse_fields(Symbol* class_name,
+                               constantPoolHandle cp, bool is_interface,
+                               FieldAllocationCount *fac,
+                               objArrayHandle* fields_annotations,
+                               int* java_fields_count_ptr, TRAPS);
 
   // Method parsing
   methodHandle parse_method(constantPoolHandle cp, bool is_interface,
@@ -150,25 +155,6 @@
   objArrayHandle compute_transitive_interfaces(instanceKlassHandle super,
                                                objArrayHandle local_ifs, TRAPS);
 
-  // Special handling for certain classes.
-  // Add the "discovered" field to java.lang.ref.Reference if
-  // it does not exist.
-  void java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr,
-                                       constantPoolHandle cp,
-                                       FieldAllocationCount *fac_ptr, TRAPS);
-  // Adjust the field allocation counts for java.lang.Class to add
-  // fake fields.
-  void java_lang_Class_fix_pre(int* nonstatic_field_size,
-                               FieldAllocationCount *fac_ptr);
-  // Adjust the next_nonstatic_oop_offset to place the fake fields
-  // before any Java fields.
-  void java_lang_Class_fix_post(int* next_nonstatic_oop_offset);
-  // Adjust the field allocation counts for java.lang.invoke.MethodHandle to add
-  // a fake address (void*) field.
-  void java_lang_invoke_MethodHandle_fix_pre(constantPoolHandle cp,
-                                     typeArrayHandle fields,
-                                     FieldAllocationCount *fac_ptr, TRAPS);
-
   // Format checker methods
   void classfile_parse_error(const char* msg, TRAPS);
   void classfile_parse_error(const char* msg, int index, TRAPS);
--- a/src/share/vm/classfile/classLoader.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/classfile/classLoader.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1350,13 +1350,13 @@
                 _codecache_sweep_counter = 0;
               }
               // Force compilation
-              CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_initial_compile,
+              CompileBroker::compile_method(m, InvocationEntryBci, CompilationPolicy::policy()->initial_compile_level(),
                                             methodHandle(), 0, "CTW", THREAD);
               if (HAS_PENDING_EXCEPTION) {
                 clear_pending_exception_if_not_oom(CHECK);
                 tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
               }
-              if (TieredCompilation) {
+              if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) {
                 // Clobber the first compile and force second tier compilation
                 nmethod* nm = m->code();
                 if (nm != NULL) {
--- a/src/share/vm/classfile/javaClasses.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/classfile/javaClasses.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -28,10 +28,12 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/debugInfo.hpp"
 #include "code/pcDesc.hpp"
+#include "compiler/compilerOracle.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
+#include "oops/fieldStreams.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/klass.hpp"
@@ -57,6 +59,49 @@
 # include "thread_windows.inline.hpp"
 #endif
 
+#define INJECTED_FIELD_COMPUTE_OFFSET(klass, name, signature, may_be_java)    \
+  klass::_##name##_offset = JavaClasses::compute_injected_offset(JavaClasses::klass##_##name##_enum);
+
+#define DECLARE_INJECTED_FIELD(klass, name, signature, may_be_java)           \
+  { SystemDictionary::WK_KLASS_ENUM_NAME(klass), vmSymbols::VM_SYMBOL_ENUM_NAME(name##_name), vmSymbols::VM_SYMBOL_ENUM_NAME(signature), may_be_java },
+
+InjectedField JavaClasses::_injected_fields[] = {
+  ALL_INJECTED_FIELDS(DECLARE_INJECTED_FIELD)
+};
+
+int JavaClasses::compute_injected_offset(InjectedFieldID id) {
+  return _injected_fields[id].compute_offset();
+}
+
+
+InjectedField* JavaClasses::get_injected(Symbol* class_name, int* field_count) {
+  *field_count = 0;
+
+  vmSymbols::SID sid = vmSymbols::find_sid(class_name);
+  if (sid == vmSymbols::NO_SID) {
+    // Only well known classes can inject fields
+    return NULL;
+  }
+
+  int count = 0;
+  int start = -1;
+
+#define LOOKUP_INJECTED_FIELD(klass, name, signature, may_be_java) \
+  if (sid == vmSymbols::VM_SYMBOL_ENUM_NAME(klass)) {              \
+    count++;                                                       \
+    if (start == -1) start = klass##_##name##_enum;                \
+  }
+  ALL_INJECTED_FIELDS(LOOKUP_INJECTED_FIELD);
+#undef LOOKUP_INJECTED_FIELD
+
+  if (start != -1) {
+    *field_count = count;
+    return _injected_fields + start;
+  }
+  return NULL;
+}
+
+
 static bool find_field(instanceKlass* ik,
                        Symbol* name_symbol, Symbol* signature_symbol,
                        fieldDescriptor* fd,
@@ -427,24 +472,19 @@
 }
 
 
-// During bootstrap, java.lang.Class wasn't loaded so static field
-// offsets were computed without the size added it.  Go back and
-// update all the static field offsets to included the size.
-static void fixup_static_field(fieldDescriptor* fd, TRAPS) {
-  if (fd->is_static()) {
-    int real_offset = fd->offset() + instanceMirrorKlass::offset_of_static_fields();
-    typeArrayOop fields = instanceKlass::cast(fd->field_holder())->fields();
-    fields->short_at_put(fd->index() + instanceKlass::low_offset,  extract_low_short_from_int(real_offset));
-    fields->short_at_put(fd->index() + instanceKlass::high_offset, extract_high_short_from_int(real_offset));
-  }
-}
-
 void java_lang_Class::fixup_mirror(KlassHandle k, TRAPS) {
   assert(instanceMirrorKlass::offset_of_static_fields() != 0, "must have been computed already");
 
   if (k->oop_is_instance()) {
-    // Fixup the offsets
-    instanceKlass::cast(k())->do_local_static_fields(&fixup_static_field, CHECK);
+    // During bootstrap, java.lang.Class wasn't loaded so static field
+    // offsets were computed without the size added it.  Go back and
+    // update all the static field offsets to included the size.
+    for (JavaFieldStream fs(instanceKlass::cast(k())); !fs.done(); fs.next()) {
+      if (fs.access_flags().is_static()) {
+        int real_offset = fs.offset() + instanceMirrorKlass::offset_of_static_fields();
+        fs.set_offset(real_offset);
+      }
+    }
   }
   create_mirror(k, CHECK);
 }
@@ -461,7 +501,7 @@
     // Allocate mirror (java.lang.Class instance)
     Handle mirror = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK_0);
     // Setup indirections
-    mirror->obj_field_put(klass_offset,  k());
+    mirror->obj_field_put(_klass_offset,  k());
     k->set_java_mirror(mirror());
 
     instanceMirrorKlass* mk = instanceMirrorKlass::cast(mirror->klass());
@@ -503,25 +543,22 @@
 
 
 int  java_lang_Class::oop_size(oop java_class) {
-  assert(oop_size_offset != 0, "must be set");
-  return java_class->int_field(oop_size_offset);
+  assert(_oop_size_offset != 0, "must be set");
+  return java_class->int_field(_oop_size_offset);
 }
 void java_lang_Class::set_oop_size(oop java_class, int size) {
-  assert(oop_size_offset != 0, "must be set");
-  java_class->int_field_put(oop_size_offset, size);
+  assert(_oop_size_offset != 0, "must be set");
+  java_class->int_field_put(_oop_size_offset, size);
 }
 int  java_lang_Class::static_oop_field_count(oop java_class) {
-  assert(static_oop_field_count_offset != 0, "must be set");
-  return java_class->int_field(static_oop_field_count_offset);
+  assert(_static_oop_field_count_offset != 0, "must be set");
+  return java_class->int_field(_static_oop_field_count_offset);
 }
 void java_lang_Class::set_static_oop_field_count(oop java_class, int size) {
-  assert(static_oop_field_count_offset != 0, "must be set");
-  java_class->int_field_put(static_oop_field_count_offset, size);
+  assert(_static_oop_field_count_offset != 0, "must be set");
+  java_class->int_field_put(_static_oop_field_count_offset, size);
 }
 
-
-
-
 oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
   // This should be improved by adding a field at the Java level or by
   // introducing a new VM klass (see comment in ClassFileParser)
@@ -541,7 +578,7 @@
 klassOop java_lang_Class::as_klassOop(oop java_class) {
   //%note memory_2
   assert(java_lang_Class::is_instance(java_class), "must be a Class object");
-  klassOop k = klassOop(java_class->obj_field(klass_offset));
+  klassOop k = klassOop(java_class->obj_field(_klass_offset));
   assert(k == NULL || k->is_klass(), "type check");
   return k;
 }
@@ -597,7 +634,7 @@
 
 
 klassOop java_lang_Class::array_klass(oop java_class) {
-  klassOop k = klassOop(java_class->obj_field(array_klass_offset));
+  klassOop k = klassOop(java_class->obj_field(_array_klass_offset));
   assert(k == NULL || k->is_klass() && Klass::cast(k)->oop_is_javaArray(), "should be array klass");
   return k;
 }
@@ -605,12 +642,12 @@
 
 void java_lang_Class::set_array_klass(oop java_class, klassOop klass) {
   assert(klass->is_klass() && Klass::cast(klass)->oop_is_javaArray(), "should be array klass");
-  java_class->obj_field_put(array_klass_offset, klass);
+  java_class->obj_field_put(_array_klass_offset, klass);
 }
 
 
 methodOop java_lang_Class::resolved_constructor(oop java_class) {
-  oop constructor = java_class->obj_field(resolved_constructor_offset);
+  oop constructor = java_class->obj_field(_resolved_constructor_offset);
   assert(constructor == NULL || constructor->is_method(), "should be method");
   return methodOop(constructor);
 }
@@ -618,21 +655,21 @@
 
 void java_lang_Class::set_resolved_constructor(oop java_class, methodOop constructor) {
   assert(constructor->is_method(), "should be method");
-  java_class->obj_field_put(resolved_constructor_offset, constructor);
+  java_class->obj_field_put(_resolved_constructor_offset, constructor);
 }
 
 
 bool java_lang_Class::is_primitive(oop java_class) {
   // should assert:
   //assert(java_lang_Class::is_instance(java_class), "must be a Class object");
-  klassOop k = klassOop(java_class->obj_field(klass_offset));
+  klassOop k = klassOop(java_class->obj_field(_klass_offset));
   return k == NULL;
 }
 
 
 BasicType java_lang_Class::primitive_type(oop java_class) {
   assert(java_lang_Class::is_primitive(java_class), "just checking");
-  klassOop ak = klassOop(java_class->obj_field(array_klass_offset));
+  klassOop ak = klassOop(java_class->obj_field(_array_klass_offset));
   BasicType type = T_VOID;
   if (ak != NULL) {
     // Note: create_basic_type_mirror above initializes ak to a non-null value.
@@ -667,34 +704,18 @@
 
 bool java_lang_Class::offsets_computed = false;
 int  java_lang_Class::classRedefinedCount_offset = -1;
-int  java_lang_Class::parallelCapable_offset = -1;
 
 void java_lang_Class::compute_offsets() {
   assert(!offsets_computed, "offsets should be initialized only once");
   offsets_computed = true;
 
-  klassOop k = SystemDictionary::Class_klass();
+  klassOop klass_oop = SystemDictionary::Class_klass();
   // The classRedefinedCount field is only present starting in 1.5,
   // so don't go fatal.
   compute_optional_offset(classRedefinedCount_offset,
-    k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
-
-  // The field indicating parallelCapable (parallelLockMap) is only present starting in 7,
-  klassOop k1 = SystemDictionary::ClassLoader_klass();
-  compute_optional_offset(parallelCapable_offset,
-    k1, vmSymbols::parallelCapable_name(), vmSymbols::concurrenthashmap_signature());
-}
-
-// For class loader classes, parallelCapable defined
-// based on non-null field
-// Written to by java.lang.ClassLoader, vm only reads this field, doesn't set it
-bool java_lang_Class::parallelCapable(oop class_loader) {
-  if (!JDK_Version::is_gte_jdk17x_version()
-     || parallelCapable_offset == -1) {
-     // Default for backward compatibility is false
-     return false;
-  }
-  return (class_loader->obj_field(parallelCapable_offset) != NULL);
+                          klass_oop, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
+
+  CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
 }
 
 int java_lang_Class::classRedefinedCount(oop the_class_mirror) {
@@ -1019,6 +1040,16 @@
   compute_offset(_ngroups_offset,     k, vmSymbols::ngroups_name(),     vmSymbols::int_signature());
 }
 
+oop java_lang_Throwable::unassigned_stacktrace() {
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::Throwable_klass());
+  address addr = ik->static_field_addr(static_unassigned_stacktrace_offset);
+  if (UseCompressedOops) {
+    return oopDesc::load_decode_heap_oop((narrowOop *)addr);
+  } else {
+    return oopDesc::load_decode_heap_oop((oop*)addr);
+  }
+}
+
 oop java_lang_Throwable::backtrace(oop throwable) {
   return throwable->obj_field_acquire(backtrace_offset);
 }
@@ -1044,9 +1075,13 @@
 }
 
 
+void java_lang_Throwable::set_stacktrace(oop throwable, oop st_element_array) {
+  throwable->obj_field_put(stackTrace_offset, st_element_array);
+}
+
 void java_lang_Throwable::clear_stacktrace(oop throwable) {
   assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
-  throwable->obj_field_put(stackTrace_offset, NULL);
+  set_stacktrace(throwable, NULL);
 }
 
 
@@ -1258,7 +1293,6 @@
   objArrayOop     _methods;
   typeArrayOop    _bcis;
   int             _index;
-  bool            _dirty;
   No_Safepoint_Verifier _nsv;
 
  public:
@@ -1272,37 +1306,13 @@
   };
 
   // constructor for new backtrace
-  BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _dirty(false) {
+  BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL) {
     expand(CHECK);
     _backtrace = _head;
     _index = 0;
   }
 
-  void flush() {
-    // The following appears to have been an optimization to save from
-    // doing a barrier for each individual store into the _methods array,
-    // but rather to do it for the entire array after the series of writes.
-    // That optimization seems to have been lost when compressed oops was
-    // implemented. However, the extra card-marks below was left in place,
-    // but is now redundant because the individual stores into the
-    // _methods array already execute the barrier code. CR 6918185 has
-    // been filed so the original code may be restored by deferring the
-    // barriers until after the entire sequence of stores, thus re-enabling
-    // the intent of the original optimization. In the meantime the redundant
-    // card mark below is now disabled.
-    if (_dirty && _methods != NULL) {
-#if 0
-      BarrierSet* bs = Universe::heap()->barrier_set();
-      assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-      bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
-#endif
-      _dirty = false;
-    }
-  }
-
   void expand(TRAPS) {
-    flush();
-
     objArrayHandle old_head(THREAD, _head);
     Pause_No_Safepoint_Verifier pnsv(&_nsv);
 
@@ -1328,7 +1338,6 @@
   }
 
   oop backtrace() {
-    flush();
     return _backtrace();
   }
 
@@ -1342,7 +1351,6 @@
     _methods->obj_at_put(_index, method);
     _bcis->ushort_at_put(_index, bci);
     _index++;
-    _dirty = true;
   }
 
   methodOop current_method() {
@@ -1367,6 +1375,7 @@
   if (JDK_Version::is_gte_jdk14x_version()) {
     // New since 1.4, clear lazily constructed Java level stacktrace if
     // refilling occurs
+    // This is unnecessary in 1.7+ but harmless
     clear_stacktrace(throwable());
   }
 
@@ -1568,6 +1577,15 @@
     // Bail-out for deep stacks
     if (chunk_count >= max_chunks) break;
   }
+
+  // For Java 7+ we support the Throwable immutability protocol defined for Java 7. This support
+  // was missing in 7u0 so in 7u0 there is a workaround in the Throwable class. That workaround
+  // can be removed in a JDK using this JVM version
+  if (JDK_Version::is_gte_jdk17x_version()) {
+      java_lang_Throwable::set_stacktrace(throwable(), java_lang_Throwable::unassigned_stacktrace());
+      assert(java_lang_Throwable::unassigned_stacktrace() != NULL, "not initialized");
+  }
+
 }
 
 
@@ -2310,7 +2328,6 @@
 int java_lang_invoke_MethodHandle::_type_offset;
 int java_lang_invoke_MethodHandle::_vmtarget_offset;
 int java_lang_invoke_MethodHandle::_vmentry_offset;
-int java_lang_invoke_MethodHandle::_vmslots_offset;
 
 int java_lang_invoke_MemberName::_clazz_offset;
 int java_lang_invoke_MemberName::_name_offset;
@@ -2326,36 +2343,33 @@
 
 int java_lang_invoke_AdapterMethodHandle::_conversion_offset;
 
+int java_lang_invoke_CountingMethodHandle::_vmcount_offset;
+
 void java_lang_invoke_MethodHandle::compute_offsets() {
-  klassOop k = SystemDictionary::MethodHandle_klass();
-  if (k != NULL && EnableInvokeDynamic) {
+  klassOop klass_oop = SystemDictionary::MethodHandle_klass();
+  if (klass_oop != NULL && EnableInvokeDynamic) {
     bool allow_super = false;
-    compute_offset(_type_offset,      k, vmSymbols::type_name(),      vmSymbols::java_lang_invoke_MethodType_signature(), allow_super);
-    compute_offset(_vmtarget_offset,  k, vmSymbols::vmtarget_name(),  vmSymbols::object_signature(),                      allow_super);
-    compute_offset(_vmentry_offset,   k, vmSymbols::vmentry_name(),   vmSymbols::machine_word_signature(),                allow_super);
-
-    // Note:  MH.vmslots (if it is present) is a hoisted copy of MH.type.form.vmslots.
-    // It is optional pending experiments to keep or toss.
-    compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), allow_super);
+    compute_offset(_type_offset,      klass_oop, vmSymbols::type_name(),      vmSymbols::java_lang_invoke_MethodType_signature(), allow_super);
+    METHODHANDLE_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
   }
 }
 
 void java_lang_invoke_MemberName::compute_offsets() {
-  klassOop k = SystemDictionary::MemberName_klass();
-  if (k != NULL && EnableInvokeDynamic) {
-    compute_offset(_clazz_offset,     k, vmSymbols::clazz_name(),     vmSymbols::class_signature());
-    compute_offset(_name_offset,      k, vmSymbols::name_name(),      vmSymbols::string_signature());
-    compute_offset(_type_offset,      k, vmSymbols::type_name(),      vmSymbols::object_signature());
-    compute_offset(_flags_offset,     k, vmSymbols::flags_name(),     vmSymbols::int_signature());
-    compute_offset(_vmtarget_offset,  k, vmSymbols::vmtarget_name(),  vmSymbols::object_signature());
-    compute_offset(_vmindex_offset,   k, vmSymbols::vmindex_name(),   vmSymbols::int_signature());
+  klassOop klass_oop = SystemDictionary::MemberName_klass();
+  if (klass_oop != NULL && EnableInvokeDynamic) {
+    compute_offset(_clazz_offset,     klass_oop, vmSymbols::clazz_name(),     vmSymbols::class_signature());
+    compute_offset(_name_offset,      klass_oop, vmSymbols::name_name(),      vmSymbols::string_signature());
+    compute_offset(_type_offset,      klass_oop, vmSymbols::type_name(),      vmSymbols::object_signature());
+    compute_offset(_flags_offset,     klass_oop, vmSymbols::flags_name(),     vmSymbols::int_signature());
+    compute_offset(_vmindex_offset,   klass_oop, vmSymbols::vmindex_name(),   vmSymbols::int_signature());
+    MEMBERNAME_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
   }
 }
 
 void java_lang_invoke_DirectMethodHandle::compute_offsets() {
   klassOop k = SystemDictionary::DirectMethodHandle_klass();
   if (k != NULL && EnableInvokeDynamic) {
-    compute_offset(_vmindex_offset,   k, vmSymbols::vmindex_name(),   vmSymbols::int_signature(),    true);
+    DIRECTMETHODHANDLE_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
   }
 }
 
@@ -2374,6 +2388,23 @@
   }
 }
 
+void java_lang_invoke_CountingMethodHandle::compute_offsets() {
+  klassOop k = SystemDictionary::CountingMethodHandle_klass();
+  if (k != NULL && EnableInvokeDynamic) {
+    compute_offset(_vmcount_offset, k, vmSymbols::vmcount_name(), vmSymbols::int_signature(), true);
+  }
+}
+
+int java_lang_invoke_CountingMethodHandle::vmcount(oop mh) {
+  assert(is_instance(mh), "CMH only");
+  return mh->int_field(_vmcount_offset);
+}
+
+void java_lang_invoke_CountingMethodHandle::set_vmcount(oop mh, int count) {
+  assert(is_instance(mh), "CMH only");
+  mh->int_field_put(_vmcount_offset, count);
+}
+
 oop java_lang_invoke_MethodHandle::type(oop mh) {
   return mh->obj_field(_type_offset);
 }
@@ -2382,31 +2413,9 @@
   mh->obj_field_put(_type_offset, mtype);
 }
 
-int java_lang_invoke_MethodHandle::vmslots(oop mh) {
-  int vmslots_offset = _vmslots_offset;
-  if (vmslots_offset != 0) {
-#ifdef ASSERT
-    int x = mh->int_field(vmslots_offset);
-    int y = compute_vmslots(mh);
-    assert(x == y, "correct hoisted value");
-#endif
-    return mh->int_field(vmslots_offset);
-  } else {
-    return compute_vmslots(mh);
-  }
-}
-
-// if MH.vmslots exists, hoist into it the value of type.form.vmslots
-void java_lang_invoke_MethodHandle::init_vmslots(oop mh) {
-  int vmslots_offset = _vmslots_offset;
-  if (vmslots_offset != 0) {
-    mh->int_field_put(vmslots_offset, compute_vmslots(mh));
-  }
-}
-
 // fetch type.form.vmslots, which is the number of JVM stack slots
 // required to carry the arguments of this MH
-int java_lang_invoke_MethodHandle::compute_vmslots(oop mh) {
+int java_lang_invoke_MethodHandle::vmslots(oop mh) {
   oop mtype = type(mh);
   if (mtype == NULL)  return 0;  // Java code would get NPE
   oop form = java_lang_invoke_MethodType::form(mtype);
@@ -2626,6 +2635,7 @@
     compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_lang_invoke_MethodType_signature(), true);
     compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_lang_invoke_MethodHandle_signature(), true);
     if (_genericInvoker_offset == 0)  _genericInvoker_offset = -1;  // set to explicit "empty" value
+    METHODTYPEFORM_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
   }
 }
 
@@ -2677,14 +2687,17 @@
   if (k != NULL) {
     compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
   }
-}
-
-oop java_lang_invoke_CallSite::target(oop site) {
-  return site->obj_field(_target_offset);
-}
-
-void java_lang_invoke_CallSite::set_target(oop site, oop target) {
-  site->obj_field_put(_target_offset, target);
+
+  // Disallow compilation of CallSite.setTargetNormal and CallSite.setTargetVolatile
+  // (For C2:  keep this until we have throttling logic for uncommon traps.)
+  if (k != NULL) {
+    instanceKlass* ik = instanceKlass::cast(k);
+    methodOop m_normal   = ik->lookup_method(vmSymbols::setTargetNormal_name(),   vmSymbols::setTarget_signature());
+    methodOop m_volatile = ik->lookup_method(vmSymbols::setTargetVolatile_name(), vmSymbols::setTarget_signature());
+    guarantee(m_normal != NULL && m_volatile != NULL, "must exist");
+    m_normal->set_not_compilable_quietly();
+    m_volatile->set_not_compilable_quietly();
+  }
 }
 
 
@@ -2731,6 +2744,18 @@
 
 
 // Support for java_lang_ClassLoader
+bool java_lang_ClassLoader::offsets_computed = false;
+int  java_lang_ClassLoader::parallelCapable_offset = -1;
+
+void java_lang_ClassLoader::compute_offsets() {
+  assert(!offsets_computed, "offsets should be initialized only once");
+  offsets_computed = true;
+
+  // The field indicating parallelCapable (parallelLockMap) is only present starting in 7,
+  klassOop k1 = SystemDictionary::ClassLoader_klass();
+  compute_optional_offset(parallelCapable_offset,
+    k1, vmSymbols::parallelCapable_name(), vmSymbols::concurrenthashmap_signature());
+}
 
 oop java_lang_ClassLoader::parent(oop loader) {
   assert(loader->is_oop(), "loader must be oop");
@@ -2738,6 +2763,18 @@
 }
 
 
+// For class loader classes, parallelCapable defined
+// based on non-null field
+// Written to by java.lang.ClassLoader, vm only reads this field, doesn't set it
+bool java_lang_ClassLoader::parallelCapable(oop class_loader) {
+  if (!JDK_Version::is_gte_jdk17x_version()
+     || parallelCapable_offset == -1) {
+     // Default for backward compatibility is false
+     return false;
+  }
+  return (class_loader->obj_field(parallelCapable_offset) != NULL);
+}
+
 bool java_lang_ClassLoader::is_trusted_loader(oop loader) {
   // Fix for 4474172; see evaluation for more details
   loader = non_reflection_class_loader(loader);
@@ -2787,16 +2824,16 @@
 int java_lang_String::offset_offset;
 int java_lang_String::count_offset;
 int java_lang_String::hash_offset;
-int java_lang_Class::klass_offset;
-int java_lang_Class::array_klass_offset;
-int java_lang_Class::resolved_constructor_offset;
-int java_lang_Class::number_of_fake_oop_fields;
-int java_lang_Class::oop_size_offset;
-int java_lang_Class::static_oop_field_count_offset;
+int java_lang_Class::_klass_offset;
+int java_lang_Class::_array_klass_offset;
+int java_lang_Class::_resolved_constructor_offset;
+int java_lang_Class::_oop_size_offset;
+int java_lang_Class::_static_oop_field_count_offset;
 int java_lang_Throwable::backtrace_offset;
 int java_lang_Throwable::detailMessage_offset;
 int java_lang_Throwable::cause_offset;
 int java_lang_Throwable::stackTrace_offset;
+int java_lang_Throwable::static_unassigned_stacktrace_offset;
 int java_lang_reflect_AccessibleObject::override_offset;
 int java_lang_reflect_Method::clazz_offset;
 int java_lang_reflect_Method::name_offset;
@@ -2904,20 +2941,20 @@
 
 
 void java_nio_Buffer::compute_offsets() {
-  klassOop k = SystemDictionary::java_nio_Buffer_klass();
+  klassOop k = SystemDictionary::nio_Buffer_klass();
   assert(k != NULL, "must be loaded in 1.4+");
   compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());
 }
 
 // Support for intrinsification of sun.misc.AtomicLongCSImpl.attemptUpdate
 int sun_misc_AtomicLongCSImpl::value_offset() {
-  assert(SystemDictionary::sun_misc_AtomicLongCSImpl_klass() != NULL, "can't call this");
+  assert(SystemDictionary::AtomicLongCSImpl_klass() != NULL, "can't call this");
   return _value_offset;
 }
 
 
 void sun_misc_AtomicLongCSImpl::compute_offsets() {
-  klassOop k = SystemDictionary::sun_misc_AtomicLongCSImpl_klass();
+  klassOop k = SystemDictionary::AtomicLongCSImpl_klass();
   // If this class is not present, its value field offset won't be referenced.
   if (k != NULL) {
     compute_offset(_value_offset, k, vmSymbols::value_name(), vmSymbols::long_signature());
@@ -2952,28 +2989,12 @@
   java_lang_String::count_offset  = java_lang_String::offset_offset + sizeof (jint);
   java_lang_String::hash_offset   = java_lang_String::count_offset + sizeof (jint);
 
-  {
-    // Do the Class Class
-    int offset = header;
-    java_lang_Class::oop_size_offset = header;
-    offset += BytesPerInt;
-    java_lang_Class::static_oop_field_count_offset = offset;
-    offset = align_size_up(offset + BytesPerInt, x);
-    java_lang_Class::klass_offset = offset;
-    offset += x;
-    java_lang_Class::array_klass_offset = offset;
-    offset += x;
-    java_lang_Class::resolved_constructor_offset = offset;
-  }
-
-  // This is NOT an offset
-  java_lang_Class::number_of_fake_oop_fields = java_lang_Class::hc_number_of_fake_oop_fields;
-
   // Throwable Class
   java_lang_Throwable::backtrace_offset  = java_lang_Throwable::hc_backtrace_offset  * x + header;
   java_lang_Throwable::detailMessage_offset = java_lang_Throwable::hc_detailMessage_offset * x + header;
   java_lang_Throwable::cause_offset      = java_lang_Throwable::hc_cause_offset      * x + header;
   java_lang_Throwable::stackTrace_offset = java_lang_Throwable::hc_stackTrace_offset * x + header;
+  java_lang_Throwable::static_unassigned_stacktrace_offset = java_lang_Throwable::hc_static_unassigned_stacktrace_offset *  x;
 
   // java_lang_boxing_object
   java_lang_boxing_object::value_offset = java_lang_boxing_object::hc_value_offset + header;
@@ -3019,8 +3040,8 @@
 
 // Compute non-hard-coded field offsets of all the classes in this file
 void JavaClasses::compute_offsets() {
-
-  java_lang_Class::compute_offsets();
+  // java_lang_Class::compute_offsets was called earlier in bootstrap
+  java_lang_ClassLoader::compute_offsets();
   java_lang_Thread::compute_offsets();
   java_lang_ThreadGroup::compute_offsets();
   if (EnableInvokeDynamic) {
@@ -3032,6 +3053,7 @@
     java_lang_invoke_MethodType::compute_offsets();
     java_lang_invoke_MethodTypeForm::compute_offsets();
     java_lang_invoke_CallSite::compute_offsets();
+    java_lang_invoke_CountingMethodHandle::compute_offsets();
   }
   java_security_AccessControlContext::compute_offsets();
   // Initialize reflection classes. The layouts of these classes
@@ -3244,6 +3266,23 @@
 
 #endif // PRODUCT
 
+int InjectedField::compute_offset() {
+  klassOop klass_oop = klass();
+  for (AllFieldStream fs(instanceKlass::cast(klass_oop)); !fs.done(); fs.next()) {
+    if (!may_be_java && !fs.access_flags().is_internal()) {
+      // Only look at injected fields
+      continue;
+    }
+    if (fs.name() == name() && fs.signature() == signature()) {
+      return fs.offset();
+    }
+  }
+  ResourceMark rm;
+  tty->print_cr("Invalid layout of %s at %s", instanceKlass::cast(klass_oop)->external_name(), name()->as_C_string());
+  fatal("Invalid layout of preloaded class");
+  return -1;
+}
+
 void javaClasses_init() {
   JavaClasses::compute_offsets();
   JavaClasses::check_offsets();
--- a/src/share/vm/classfile/javaClasses.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/classfile/javaClasses.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -156,30 +156,32 @@
 
 // Interface to java.lang.Class objects
 
+#define CLASS_INJECTED_FIELDS(macro)                                       \
+  macro(java_lang_Class, klass,                  object_signature,  false) \
+  macro(java_lang_Class, resolved_constructor,   object_signature,  false) \
+  macro(java_lang_Class, array_klass,            object_signature,  false) \
+  macro(java_lang_Class, oop_size,               int_signature,     false) \
+  macro(java_lang_Class, static_oop_field_count, int_signature,     false)
+
 class java_lang_Class : AllStatic {
-   friend class VMStructs;
+  friend class VMStructs;
+
  private:
   // The fake offsets are added by the class loader when java.lang.Class is loaded
 
-  enum {
-    hc_number_of_fake_oop_fields   = 3,
-    hc_number_of_fake_int_fields   = 2
-  };
+  static int _klass_offset;
+  static int _resolved_constructor_offset;
+  static int _array_klass_offset;
 
-  static int klass_offset;
-  static int resolved_constructor_offset;
-  static int array_klass_offset;
-  static int number_of_fake_oop_fields;
+  static int _oop_size_offset;
+  static int _static_oop_field_count_offset;
 
-  static int oop_size_offset;
-  static int static_oop_field_count_offset;
-
-  static void compute_offsets();
   static bool offsets_computed;
   static int classRedefinedCount_offset;
-  static int parallelCapable_offset;
 
  public:
+  static void compute_offsets();
+
   // Instance creation
   static oop  create_mirror(KlassHandle k, TRAPS);
   static void fixup_mirror(KlassHandle k, TRAPS);
@@ -209,14 +211,12 @@
   static klassOop array_klass(oop java_class);
   static void set_array_klass(oop java_class, klassOop klass);
   // compiler support for class operations
-  static int klass_offset_in_bytes() { return klass_offset; }
-  static int resolved_constructor_offset_in_bytes() { return resolved_constructor_offset; }
-  static int array_klass_offset_in_bytes() { return array_klass_offset; }
+  static int klass_offset_in_bytes()                { return _klass_offset; }
+  static int resolved_constructor_offset_in_bytes() { return _resolved_constructor_offset; }
+  static int array_klass_offset_in_bytes()          { return _array_klass_offset; }
   // Support for classRedefinedCount field
   static int classRedefinedCount(oop the_class_mirror);
   static void set_classRedefinedCount(oop the_class_mirror, int value);
-  // Support for parallelCapable field
-  static bool parallelCapable(oop the_class_mirror);
 
   static int oop_size(oop java_class);
   static void set_oop_size(oop java_class, int size);
@@ -393,6 +393,9 @@
     hc_cause_offset         =  2,  // New since 1.4
     hc_stackTrace_offset    =  3   // New since 1.4
   };
+  enum {
+      hc_static_unassigned_stacktrace_offset = 0  // New since 1.7
+  };
   // Trace constants
   enum {
     trace_methods_offset = 0,
@@ -406,6 +409,7 @@
   static int detailMessage_offset;
   static int cause_offset;
   static int stackTrace_offset;
+  static int static_unassigned_stacktrace_offset;
 
   // Printing
   static char* print_stack_element_to_buffer(methodOop method, int bci);
@@ -414,6 +418,9 @@
   static void clear_stacktrace(oop throwable);
   // No stack trace available
   static const char* no_stack_trace_message();
+  // Stacktrace (post JDK 1.7.0 to allow immutability protocol to be followed)
+  static void set_stacktrace(oop throwable, oop st_element_array);
+  static oop unassigned_stacktrace();
 
  public:
   // Backtrace
@@ -438,7 +445,6 @@
   static void allocate_backtrace(Handle throwable, TRAPS);
   // Fill in current stack trace for throwable with preallocated backtrace (no GC)
   static void fill_in_stack_trace_of_preallocated_backtrace(Handle throwable);
-
   // Fill in current stack trace, can cause GC
   static void fill_in_stack_trace(Handle throwable, methodHandle method, TRAPS);
   static void fill_in_stack_trace(Handle throwable, methodHandle method = methodHandle());
@@ -765,7 +771,7 @@
     ref->obj_field_put(referent_offset, value);
   }
   static void set_referent_raw(oop ref, oop value) {
-    ref->obj_field_raw_put(referent_offset, value);
+    ref->obj_field_put_raw(referent_offset, value);
   }
   static HeapWord* referent_addr(oop ref) {
     return ref->obj_field_addr<HeapWord>(referent_offset);
@@ -777,7 +783,7 @@
     ref->obj_field_put(next_offset, value);
   }
   static void set_next_raw(oop ref, oop value) {
-    ref->obj_field_raw_put(next_offset, value);
+    ref->obj_field_put_raw(next_offset, value);
   }
   static HeapWord* next_addr(oop ref) {
     return ref->obj_field_addr<HeapWord>(next_offset);
@@ -789,7 +795,7 @@
     ref->obj_field_put(discovered_offset, value);
   }
   static void set_discovered_raw(oop ref, oop value) {
-    ref->obj_field_raw_put(discovered_offset, value);
+    ref->obj_field_put_raw(discovered_offset, value);
   }
   static HeapWord* discovered_addr(oop ref) {
     return ref->obj_field_addr<HeapWord>(discovered_offset);
@@ -828,16 +834,19 @@
 
 // Interface to java.lang.invoke.MethodHandle objects
 
+#define METHODHANDLE_INJECTED_FIELDS(macro)                               \
+  macro(java_lang_invoke_MethodHandle, vmentry,  intptr_signature, false) \
+  macro(java_lang_invoke_MethodHandle, vmtarget, object_signature, true)
+
 class MethodHandleEntry;
 
 class java_lang_invoke_MethodHandle: AllStatic {
   friend class JavaClasses;
 
  private:
-  static int _vmentry_offset;           // assembly code trampoline for MH
-  static int _vmtarget_offset;          // class-specific target reference
+  static int _vmentry_offset;            // assembly code trampoline for MH
+  static int _vmtarget_offset;           // class-specific target reference
   static int _type_offset;              // the MethodType of this MH
-  static int _vmslots_offset;           // OPTIONAL hoisted type.form.vmslots
 
   static void compute_offsets();
 
@@ -853,8 +862,6 @@
   static void       set_vmentry(oop mh, MethodHandleEntry* data);
 
   static int            vmslots(oop mh);
-  static void      init_vmslots(oop mh);
-  static int    compute_vmslots(oop mh);
 
   // Testers
   static bool is_subclass(klassOop klass) {
@@ -868,14 +875,15 @@
   static int type_offset_in_bytes()             { return _type_offset; }
   static int vmtarget_offset_in_bytes()         { return _vmtarget_offset; }
   static int vmentry_offset_in_bytes()          { return _vmentry_offset; }
-  static int vmslots_offset_in_bytes()          { return _vmslots_offset; }
 };
 
+#define DIRECTMETHODHANDLE_INJECTED_FIELDS(macro)                          \
+  macro(java_lang_invoke_DirectMethodHandle, vmindex, int_signature, true)
+
 class java_lang_invoke_DirectMethodHandle: public java_lang_invoke_MethodHandle {
   friend class JavaClasses;
 
  private:
-  //         _vmtarget_offset;          // method   or class      or interface
   static int _vmindex_offset;           // negative or vtable idx or itable idx
   static void compute_offsets();
 
@@ -975,9 +983,40 @@
 };
 
 
+// A simple class that maintains an invocation count
+class java_lang_invoke_CountingMethodHandle: public java_lang_invoke_MethodHandle {
+  friend class JavaClasses;
+
+ private:
+  static int _vmcount_offset;
+  static void compute_offsets();
+
+ public:
+  // Accessors
+  static int            vmcount(oop mh);
+  static void       set_vmcount(oop mh, int count);
+
+  // Testers
+  static bool is_subclass(klassOop klass) {
+    return SystemDictionary::CountingMethodHandle_klass() != NULL &&
+      Klass::cast(klass)->is_subclass_of(SystemDictionary::CountingMethodHandle_klass());
+  }
+  static bool is_instance(oop obj) {
+    return obj != NULL && is_subclass(obj->klass());
+  }
+
+  // Accessors for code generation:
+  static int vmcount_offset_in_bytes()          { return _vmcount_offset; }
+};
+
+
+
 // Interface to java.lang.invoke.MemberName objects
 // (These are a private interface for Java code to query the class hierarchy.)
 
+#define MEMBERNAME_INJECTED_FIELDS(macro)                              \
+  macro(java_lang_invoke_MemberName, vmtarget, object_signature, true)
+
 class java_lang_invoke_MemberName: AllStatic {
   friend class JavaClasses;
 
@@ -1087,6 +1126,10 @@
   static int form_offset_in_bytes()             { return _form_offset; }
 };
 
+#define METHODTYPEFORM_INJECTED_FIELDS(macro)                              \
+  macro(java_lang_invoke_MethodTypeForm, vmslots,  int_signature,    true) \
+  macro(java_lang_invoke_MethodTypeForm, vmlayout, object_signature, true)
+
 class java_lang_invoke_MethodTypeForm: AllStatic {
   friend class JavaClasses;
 
@@ -1101,6 +1144,8 @@
  public:
   // Accessors
   static int            vmslots(oop mtform);
+  static void       set_vmslots(oop mtform, int vmslots);
+
   static oop            erasedType(oop mtform);
   static oop            genericInvoker(oop mtform);
 
@@ -1122,21 +1167,16 @@
 
 private:
   static int _target_offset;
-  static int _caller_method_offset;
-  static int _caller_bci_offset;
 
   static void compute_offsets();
 
 public:
   // Accessors
-  static oop            target(oop site);
-  static void       set_target(oop site, oop target);
+  static oop              target(         oop site)             { return site->obj_field(             _target_offset);         }
+  static void         set_target(         oop site, oop target) {        site->obj_field_put(         _target_offset, target); }
 
-  static oop            caller_method(oop site);
-  static void       set_caller_method(oop site, oop ref);
-
-  static jint           caller_bci(oop site);
-  static void       set_caller_bci(oop site, jint bci);
+  static volatile oop     target_volatile(oop site)             { return site->obj_field_volatile(    _target_offset);         }
+  static void         set_target_volatile(oop site, oop target) {        site->obj_field_put_volatile(_target_offset, target); }
 
   // Testers
   static bool is_subclass(klassOop klass) {
@@ -1148,8 +1188,6 @@
 
   // Accessors for code generation:
   static int target_offset_in_bytes()           { return _target_offset; }
-  static int caller_method_offset_in_bytes()    { return _caller_method_offset; }
-  static int caller_bci_offset_in_bytes()       { return _caller_bci_offset; }
 };
 
 
@@ -1180,11 +1218,18 @@
    hc_parent_offset = 0
   };
 
+  static bool offsets_computed;
   static int parent_offset;
+  static int parallelCapable_offset;
+
+  static void compute_offsets();
 
  public:
   static oop parent(oop loader);
 
+  // Support for parallelCapable field
+  static bool parallelCapable(oop the_class_mirror);
+
   static bool is_trusted_loader(oop loader);
 
   // Fix for 4474172
@@ -1306,17 +1351,71 @@
   static oop  get_owner_threadObj(oop obj);
 };
 
+// Use to declare fields that need to be injected into Java classes
+// for the JVM to use.  The name_index and signature_index are
+// declared in vmSymbols.  The may_be_java flag is used to declare
+// fields that might already exist in Java but should be injected if
+// they don't.  Otherwise the field is unconditionally injected and
+// the JVM uses the injected one.  This is to ensure that name
+// collisions don't occur.  In general may_be_java should be false
+// unless there's a good reason.
+
+class InjectedField {
+ public:
+  const SystemDictionary::WKID klass_id;
+  const vmSymbols::SID name_index;
+  const vmSymbols::SID signature_index;
+  const bool           may_be_java;
+
+
+  klassOop klass() const    { return SystemDictionary::well_known_klass(klass_id); }
+  Symbol* name() const      { return lookup_symbol(name_index); }
+  Symbol* signature() const { return lookup_symbol(signature_index); }
+
+  int compute_offset();
+
+  // Find the Symbol for this index
+  static Symbol* lookup_symbol(int symbol_index) {
+    return vmSymbols::symbol_at((vmSymbols::SID)symbol_index);
+  }
+};
+
+#define DECLARE_INJECTED_FIELD_ENUM(klass, name, signature, may_be_java) \
+  klass##_##name##_enum,
+
+#define ALL_INJECTED_FIELDS(macro)          \
+  CLASS_INJECTED_FIELDS(macro)              \
+  METHODHANDLE_INJECTED_FIELDS(macro)       \
+  DIRECTMETHODHANDLE_INJECTED_FIELDS(macro) \
+  MEMBERNAME_INJECTED_FIELDS(macro)         \
+  METHODTYPEFORM_INJECTED_FIELDS(macro)
+
 // Interface to hard-coded offset checking
 
 class JavaClasses : AllStatic {
  private:
+
+  static InjectedField _injected_fields[];
+
   static bool check_offset(const char *klass_name, int offset, const char *field_name, const char* field_sig) PRODUCT_RETURN0;
   static bool check_static_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) PRODUCT_RETURN0;
   static bool check_constant(const char *klass_name, int constant, const char *field_name, const char* field_sig) PRODUCT_RETURN0;
+
  public:
+  enum InjectedFieldID {
+    ALL_INJECTED_FIELDS(DECLARE_INJECTED_FIELD_ENUM)
+    MAX_enum
+  };
+
+  static int compute_injected_offset(InjectedFieldID id);
+
   static void compute_hard_coded_offsets();
   static void compute_offsets();
   static void check_offsets() PRODUCT_RETURN;
+
+  static InjectedField* get_injected(Symbol* class_name, int* field_count);
 };
 
+#undef DECLARE_INJECTED_FIELD_ENUM
+
 #endif // SHARE_VM_CLASSFILE_JAVACLASSES_HPP
--- a/src/share/vm/classfile/systemDictionary.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/classfile/systemDictionary.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -125,13 +125,13 @@
 bool SystemDictionary::is_parallelCapable(Handle class_loader) {
   if (UnsyncloadClass || class_loader.is_null()) return true;
   if (AlwaysLockClassLoader) return false;
-  return java_lang_Class::parallelCapable(class_loader());
+  return java_lang_ClassLoader::parallelCapable(class_loader());
 }
 // ----------------------------------------------------------------------------
 // ParallelDefineClass flag does not apply to bootclass loader
 bool SystemDictionary::is_parallelDefine(Handle class_loader) {
    if (class_loader.is_null()) return false;
-   if (AllowParallelDefineClass && java_lang_Class::parallelCapable(class_loader())) {
+   if (AllowParallelDefineClass && java_lang_ClassLoader::parallelCapable(class_loader())) {
      return true;
    }
    return false;
@@ -1290,7 +1290,7 @@
                                                     Symbol* class_name,
                                                     TRAPS) {
 
-  klassOop dlm = SystemDictionary::sun_jkernel_DownloadManager_klass();
+  klassOop dlm = SystemDictionary::DownloadManager_klass();
   instanceKlassHandle nk;
 
   // If download manager class isn't loaded just return.
@@ -1953,7 +1953,7 @@
   // first do Object, String, Class
   initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Class_klass), scan, CHECK);
 
-  debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(WK_KLASS(Class_klass)));
+  java_lang_Class::compute_offsets();
 
   // Fixup mirrors for classes loaded before java.lang.Class.
   // These calls iterate over the objects currently in the perm gen
@@ -1978,7 +1978,7 @@
 
   // JSR 292 classes
   WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
-  WKID jsr292_group_end   = WK_KLASS_ENUM_NAME(CallSite_klass);
+  WKID jsr292_group_end   = WK_KLASS_ENUM_NAME(VolatileCallSite_klass);
   initialize_wk_klasses_until(jsr292_group_start, scan, CHECK);
   if (EnableInvokeDynamic) {
     initialize_wk_klasses_through(jsr292_group_end, scan, CHECK);
@@ -2001,7 +2001,7 @@
   //_box_klasses[T_ARRAY]   = WK_KLASS(object_klass);
 
 #ifdef KERNEL
-  if (sun_jkernel_DownloadManager_klass() == NULL) {
+  if (DownloadManager_klass() == NULL) {
     warning("Cannot find sun/jkernel/DownloadManager");
   }
 #endif // KERNEL
@@ -2735,7 +2735,7 @@
       class_size += ik->local_interfaces()->size();
       class_size += ik->transitive_interfaces()->size();
       // We do not have to count implementors, since we only store one!
-      class_size += ik->fields()->size();
+      class_size += ik->all_fields_count() * FieldInfo::field_slots;
     }
   }
 
--- a/src/share/vm/classfile/systemDictionary.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/classfile/systemDictionary.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -133,29 +133,33 @@
   template(reflect_Method_klass,         java_lang_reflect_Method,       Pre) \
   template(reflect_Constructor_klass,    java_lang_reflect_Constructor,  Pre) \
                                                                               \
-  /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
-  /* Universe::is_gte_jdk14x_version() is not set up by this point. */        \
-  /* It's okay if this turns out to be NULL in non-1.4 JDKs. */               \
-  template(reflect_MagicAccessorImpl_klass,          sun_reflect_MagicAccessorImpl,  Opt) \
-  template(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
-  template(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
-  template(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt) \
-  template(reflect_ConstantPool_klass,  sun_reflect_ConstantPool,       Opt_Only_JDK15) \
-  template(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
+  /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */                              \
+  /* Universe::is_gte_jdk14x_version() is not set up by this point. */                                                   \
+  /* It's okay if this turns out to be NULL in non-1.4 JDKs. */                                                          \
+  template(reflect_MagicAccessorImpl_klass,             sun_reflect_MagicAccessorImpl,             Opt)                  \
+  template(reflect_MethodAccessorImpl_klass,            sun_reflect_MethodAccessorImpl,            Opt_Only_JDK14NewRef) \
+  template(reflect_ConstructorAccessorImpl_klass,       sun_reflect_ConstructorAccessorImpl,       Opt_Only_JDK14NewRef) \
+  template(reflect_DelegatingClassLoader_klass,         sun_reflect_DelegatingClassLoader,         Opt)                  \
+  template(reflect_ConstantPool_klass,                  sun_reflect_ConstantPool,                  Opt_Only_JDK15)       \
+  template(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15)       \
                                                                               \
   /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
-  template(MethodHandle_klass,           java_lang_invoke_MethodHandle,     Pre_JSR292) \
-  template(MemberName_klass,             java_lang_invoke_MemberName,       Pre_JSR292) \
-  template(MethodHandleNatives_klass,    java_lang_invoke_MethodHandleNatives, Pre_JSR292) \
-  template(AdapterMethodHandle_klass,    java_lang_invoke_AdapterMethodHandle, Pre_JSR292) \
-  template(BoundMethodHandle_klass,      java_lang_invoke_BoundMethodHandle, Pre_JSR292) \
-  template(DirectMethodHandle_klass,     java_lang_invoke_DirectMethodHandle, Pre_JSR292) \
-  template(MethodType_klass,             java_lang_invoke_MethodType,       Pre_JSR292) \
-  template(MethodTypeForm_klass,         java_lang_invoke_MethodTypeForm,   Pre_JSR292) \
-  template(BootstrapMethodError_klass,   java_lang_BootstrapMethodError, Pre_JSR292) \
+  template(MethodHandle_klass,             java_lang_invoke_MethodHandle,             Pre_JSR292) \
+  template(MemberName_klass,               java_lang_invoke_MemberName,               Pre_JSR292) \
+  template(MethodHandleNatives_klass,      java_lang_invoke_MethodHandleNatives,      Pre_JSR292) \
+  template(AdapterMethodHandle_klass,      java_lang_invoke_AdapterMethodHandle,      Pre_JSR292) \
+  template(BoundMethodHandle_klass,        java_lang_invoke_BoundMethodHandle,        Pre_JSR292) \
+  template(DirectMethodHandle_klass,       java_lang_invoke_DirectMethodHandle,       Pre_JSR292) \
+  template(MethodType_klass,               java_lang_invoke_MethodType,               Pre_JSR292) \
+  template(MethodTypeForm_klass,           java_lang_invoke_MethodTypeForm,           Pre_JSR292) \
+  template(BootstrapMethodError_klass,     java_lang_BootstrapMethodError,            Pre_JSR292) \
   template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
-  template(CallSite_klass,               java_lang_invoke_CallSite,         Pre_JSR292) \
-  /* Note: MethodHandle must be first, and CallSite last in group */          \
+  template(CallSite_klass,                 java_lang_invoke_CallSite,                 Pre_JSR292) \
+  template(CountingMethodHandle_klass,     java_lang_invoke_CountingMethodHandle,     Opt)        \
+  template(ConstantCallSite_klass,         java_lang_invoke_ConstantCallSite,         Pre_JSR292) \
+  template(MutableCallSite_klass,          java_lang_invoke_MutableCallSite,          Pre_JSR292) \
+  template(VolatileCallSite_klass,         java_lang_invoke_VolatileCallSite,         Pre_JSR292) \
+  /* Note: MethodHandle must be first, and VolatileCallSite last in group */  \
                                                                               \
   template(StringBuffer_klass,           java_lang_StringBuffer,         Pre) \
   template(StringBuilder_klass,          java_lang_StringBuilder,        Pre) \
@@ -164,14 +168,14 @@
   template(StackTraceElement_klass,      java_lang_StackTraceElement,    Opt) \
   /* Universe::is_gte_jdk14x_version() is not set up by this point. */        \
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */               \
-  template(java_nio_Buffer_klass,        java_nio_Buffer,                Opt) \
+  template(nio_Buffer_klass,             java_nio_Buffer,                Opt) \
                                                                               \
   /* If this class isn't present, it won't be referenced. */                  \
-  template(sun_misc_AtomicLongCSImpl_klass, sun_misc_AtomicLongCSImpl,   Opt) \
+  template(AtomicLongCSImpl_klass,       sun_misc_AtomicLongCSImpl,   Opt)    \
                                                                               \
-  template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
+  template(DownloadManager_klass,        sun_jkernel_DownloadManager, Opt_Kernel) \
                                                                               \
-  template(sun_misc_PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt)       \
+  template(PostVMInitHook_klass,         sun_misc_PostVMInitHook, Opt)        \
                                                                               \
   /* Preload boxing klasses */                                                \
   template(Boolean_klass,                java_lang_Boolean,              Pre) \
@@ -195,7 +199,7 @@
   enum WKID {
     NO_WKID = 0,
 
-    #define WK_KLASS_ENUM(name, ignore_s, ignore_o) WK_KLASS_ENUM_NAME(name),
+    #define WK_KLASS_ENUM(name, symbol, ignore_o) WK_KLASS_ENUM_NAME(name), WK_KLASS_ENUM_NAME(symbol) = WK_KLASS_ENUM_NAME(name),
     WK_KLASSES_DO(WK_KLASS_ENUM)
     #undef WK_KLASS_ENUM
 
@@ -422,11 +426,16 @@
   }
 
 public:
-  #define WK_KLASS_DECLARE(name, ignore_symbol, option) \
+  #define WK_KLASS_DECLARE(name, symbol, option) \
     static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); }
   WK_KLASSES_DO(WK_KLASS_DECLARE);
   #undef WK_KLASS_DECLARE
 
+  static klassOop well_known_klass(WKID id) {
+    assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob");
+    return _well_known_klasses[id];
+  }
+
   // Local definition for direct access to the private array:
   #define WK_KLASS(name) _well_known_klasses[SystemDictionary::WK_KLASS_ENUM_NAME(name)]
 
--- a/src/share/vm/classfile/vmSymbols.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/classfile/vmSymbols.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -218,6 +218,7 @@
   template(returnType_name,                           "returnType")                               \
   template(signature_name,                            "signature")                                \
   template(slot_name,                                 "slot")                                     \
+  template(selectAlternative_name,                    "selectAlternative")                        \
                                                                                                   \
   /* Support for annotations (JDK 1.5 and above) */                                               \
                                                                                                   \
@@ -233,6 +234,9 @@
   template(java_lang_invoke_InvokeDynamic,            "java/lang/invoke/InvokeDynamic")           \
   template(java_lang_invoke_Linkage,                  "java/lang/invoke/Linkage")                 \
   template(java_lang_invoke_CallSite,                 "java/lang/invoke/CallSite")                \
+  template(java_lang_invoke_ConstantCallSite,         "java/lang/invoke/ConstantCallSite")        \
+  template(java_lang_invoke_MutableCallSite,          "java/lang/invoke/MutableCallSite")         \
+  template(java_lang_invoke_VolatileCallSite,         "java/lang/invoke/VolatileCallSite")        \
   template(java_lang_invoke_MethodHandle,             "java/lang/invoke/MethodHandle")            \
   template(java_lang_invoke_MethodType,               "java/lang/invoke/MethodType")              \
   template(java_lang_invoke_WrongMethodTypeException, "java/lang/invoke/WrongMethodTypeException") \
@@ -243,9 +247,11 @@
   template(java_lang_invoke_MethodTypeForm_signature, "Ljava/lang/invoke/MethodTypeForm;")        \
   template(java_lang_invoke_MemberName,               "java/lang/invoke/MemberName")              \
   template(java_lang_invoke_MethodHandleNatives,      "java/lang/invoke/MethodHandleNatives")     \
+  template(java_lang_invoke_MethodHandleImpl,         "java/lang/invoke/MethodHandleImpl")        \
   template(java_lang_invoke_AdapterMethodHandle,      "java/lang/invoke/AdapterMethodHandle")     \
   template(java_lang_invoke_BoundMethodHandle,        "java/lang/invoke/BoundMethodHandle")       \
   template(java_lang_invoke_DirectMethodHandle,       "java/lang/invoke/DirectMethodHandle")      \
+  template(java_lang_invoke_CountingMethodHandle,     "java/lang/invoke/CountingMethodHandle")    \
   /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */         \
   template(findMethodHandleType_name,                 "findMethodHandleType")                     \
   template(findMethodHandleType_signature,       "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
@@ -255,8 +261,12 @@
   template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \
   template(makeDynamicCallSite_name,                  "makeDynamicCallSite")                      \
   template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \
-  NOT_LP64(  do_alias(machine_word_signature,         int_signature)  )                           \
-  LP64_ONLY( do_alias(machine_word_signature,         long_signature) )                           \
+  template(setTargetNormal_name,                      "setTargetNormal")                          \
+  template(setTargetVolatile_name,                    "setTargetVolatile")                        \
+  template(setTarget_signature,                       "(Ljava/lang/invoke/MethodHandle;)V")       \
+  NOT_LP64(  do_alias(intptr_signature,               int_signature)  )                           \
+  LP64_ONLY( do_alias(intptr_signature,               long_signature) )                           \
+  template(selectAlternative_signature, "(ZLjava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodHandle;)Ljava/lang/invoke/MethodHandle;") \
                                                                                                   \
   /* common method and field names */                                                             \
   template(object_initializer_name,                   "<init>")                                   \
@@ -341,6 +351,7 @@
   template(vmmethod_name,                             "vmmethod")                                 \
   template(vmtarget_name,                             "vmtarget")                                 \
   template(vmentry_name,                              "vmentry")                                  \
+  template(vmcount_name,                              "vmcount")                                  \
   template(vmslots_name,                              "vmslots")                                  \
   template(vmlayout_name,                             "vmlayout")                                 \
   template(vmindex_name,                              "vmindex")                                  \
@@ -354,6 +365,11 @@
   template(erasedType_name,                           "erasedType")                               \
   template(genericInvoker_name,                       "genericInvoker")                           \
   template(append_name,                               "append")                                   \
+  template(klass_name,                                "klass")                                    \
+  template(resolved_constructor_name,                 "resolved_constructor")                     \
+  template(array_klass_name,                          "array_klass")                              \
+  template(oop_size_name,                             "oop_size")                                 \
+  template(static_oop_field_count_name,               "static_oop_field_count")                   \
                                                                                                   \
   /* non-intrinsic name/signature pairs: */                                                       \
   template(register_method_name,                      "register")                                 \
@@ -904,6 +920,8 @@
   do_intrinsic(_invokeVarargs,            java_lang_invoke_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R)  \
   do_intrinsic(_invokeDynamic,            java_lang_invoke_InvokeDynamic, star_name,         object_array_object_signature, F_SN) \
                                                                                                                         \
+  do_intrinsic(_selectAlternative,        java_lang_invoke_MethodHandleImpl, selectAlternative_name, selectAlternative_signature, F_S)  \
+                                                                                                                        \
   /* unboxing methods: */                                                                                               \
   do_intrinsic(_booleanValue,             java_lang_Boolean,      booleanValue_name, void_boolean_signature, F_R)       \
    do_name(     booleanValue_name,       "booleanValue")                                                                \
@@ -949,7 +967,8 @@
 // Class vmSymbols
 
 class vmSymbols: AllStatic {
- friend class vmIntrinsics;
+  friend class vmIntrinsics;
+  friend class VMStructs;
  public:
   // enum for figuring positions and size of array holding Symbol*s
   enum SID {
--- a/src/share/vm/code/dependencies.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/code/dependencies.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -113,6 +113,11 @@
   assert_common_1(no_finalizable_subclasses, ctxk);
 }
 
+void Dependencies::assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle) {
+  check_ctxk(call_site->klass());
+  assert_common_2(call_site_target_value, call_site, method_handle);
+}
+
 // Helper function.  If we are adding a new dep. under ctxk2,
 // try to find an old dep. under a broader* ctxk1.  If there is
 //
@@ -130,7 +135,7 @@
   }
 }
 
-void Dependencies::assert_common_1(Dependencies::DepType dept, ciObject* x) {
+void Dependencies::assert_common_1(DepType dept, ciObject* x) {
   assert(dep_args(dept) == 1, "sanity");
   log_dependency(dept, x);
   GrowableArray<ciObject*>* deps = _deps[dept];
@@ -143,21 +148,37 @@
   }
 }
 
-void Dependencies::assert_common_2(Dependencies::DepType dept,
-                                   ciKlass* ctxk, ciObject* x) {
-  assert(dep_context_arg(dept) == 0, "sanity");
+void Dependencies::assert_common_2(DepType dept,
+                                   ciObject* x0, ciObject* x1) {
   assert(dep_args(dept) == 2, "sanity");
-  log_dependency(dept, ctxk, x);
+  log_dependency(dept, x0, x1);
   GrowableArray<ciObject*>* deps = _deps[dept];
 
   // see if the same (or a similar) dep is already recorded
-  if (note_dep_seen(dept, x)) {
-    // look in this bucket for redundant assertions
-    const int stride = 2;
-    for (int i = deps->length(); (i -= stride) >= 0; ) {
-      ciObject* x1 = deps->at(i+1);
-      if (x == x1) {  // same subject; check the context
-        if (maybe_merge_ctxk(deps, i+0, ctxk)) {
+  bool has_ctxk = has_explicit_context_arg(dept);
+  if (has_ctxk) {
+    assert(dep_context_arg(dept) == 0, "sanity");
+    if (note_dep_seen(dept, x1)) {
+      // look in this bucket for redundant assertions
+      const int stride = 2;
+      for (int i = deps->length(); (i -= stride) >= 0; ) {
+        ciObject* y1 = deps->at(i+1);
+        if (x1 == y1) {  // same subject; check the context
+          if (maybe_merge_ctxk(deps, i+0, x0->as_klass())) {
+            return;
+          }
+        }
+      }
+    }
+  } else {
+    assert(dep_implicit_context_arg(dept) == 0, "sanity");
+    if (note_dep_seen(dept, x0) && note_dep_seen(dept, x1)) {
+      // look in this bucket for redundant assertions
+      const int stride = 2;
+      for (int i = deps->length(); (i -= stride) >= 0; ) {
+        ciObject* y0 = deps->at(i+0);
+        ciObject* y1 = deps->at(i+1);
+        if (x0 == y0 && x1 == y1) {
           return;
         }
       }
@@ -165,11 +186,11 @@
   }
 
   // append the assertion in the correct bucket:
-  deps->append(ctxk);
-  deps->append(x);
+  deps->append(x0);
+  deps->append(x1);
 }
 
-void Dependencies::assert_common_3(Dependencies::DepType dept,
+void Dependencies::assert_common_3(DepType dept,
                                    ciKlass* ctxk, ciObject* x, ciObject* x2) {
   assert(dep_context_arg(dept) == 0, "sanity");
   assert(dep_args(dept) == 3, "sanity");
@@ -341,7 +362,8 @@
   "unique_concrete_method",
   "abstract_with_exclusive_concrete_subtypes_2",
   "exclusive_concrete_methods_2",
-  "no_finalizable_subclasses"
+  "no_finalizable_subclasses",
+  "call_site_target_value"
 };
 
 int Dependencies::_dep_args[TYPE_LIMIT] = {
@@ -354,7 +376,8 @@
   2, // unique_concrete_method ctxk, m
   3, // unique_concrete_subtypes_2 ctxk, k1, k2
   3, // unique_concrete_methods_2 ctxk, m1, m2
-  1  // no_finalizable_subclasses ctxk
+  1, // no_finalizable_subclasses ctxk
+  2  // call_site_target_value call_site, method_handle
 };
 
 const char* Dependencies::dep_name(Dependencies::DepType dept) {
@@ -367,6 +390,10 @@
   return _dep_args[dept];
 }
 
+void Dependencies::check_valid_dependency_type(DepType dept) {
+  guarantee(FIRST_TYPE <= dept && dept < TYPE_LIMIT, err_msg("invalid dependency type: %d", (int) dept));
+}
+
 // for the sake of the compiler log, print out current dependencies:
 void Dependencies::log_all_dependencies() {
   if (log() == NULL)  return;
@@ -572,8 +599,7 @@
     code_byte -= ctxk_bit;
     DepType dept = (DepType)code_byte;
     _type = dept;
-    guarantee((dept - FIRST_TYPE) < (TYPE_LIMIT - FIRST_TYPE),
-              "bad dependency type tag");
+    Dependencies::check_valid_dependency_type(dept);
     int stride = _dep_args[dept];
     assert(stride == dep_args(dept), "sanity");
     int skipj = -1;
@@ -601,18 +627,35 @@
 
 klassOop Dependencies::DepStream::context_type() {
   assert(must_be_in_vm(), "raw oops here");
-  int ctxkj = dep_context_arg(_type);  // -1 if no context arg
-  if (ctxkj < 0) {
-    return NULL;           // for example, evol_method
-  } else {
-    oop k = recorded_oop_at(_xi[ctxkj]);
-    if (k != NULL) {       // context type was not compressed away
+
+  // Most dependencies have an explicit context type argument.
+  {
+    int ctxkj = dep_context_arg(_type);  // -1 if no explicit context arg
+    if (ctxkj >= 0) {
+      oop k = argument(ctxkj);
+      if (k != NULL) {       // context type was not compressed away
+        assert(k->is_klass(), "type check");
+        return (klassOop) k;
+      }
+      // recompute "default" context type
+      return ctxk_encoded_as_null(_type, argument(ctxkj+1));
+    }
+  }
+
+  // Some dependencies are using the klass of the first object
+  // argument as implicit context type (e.g. call_site_target_value).
+  {
+    int ctxkj = dep_implicit_context_arg(_type);
+    if (ctxkj >= 0) {
+      oop k = argument(ctxkj)->klass();
       assert(k->is_klass(), "type check");
       return (klassOop) k;
-    } else {               // recompute "default" context type
-      return ctxk_encoded_as_null(_type, recorded_oop_at(_xi[ctxkj+1]));
     }
   }
+
+  // And some dependencies don't have a context type at all,
+  // e.g. evol_method.
+  return NULL;
 }
 
 /// Checking dependencies:
@@ -800,11 +843,11 @@
                                  bool participants_hide_witnesses,
                                  bool top_level_call = true);
   // the spot-checking version:
-  klassOop find_witness_in(DepChange& changes,
+  klassOop find_witness_in(KlassDepChange& changes,
                            klassOop context_type,
                            bool participants_hide_witnesses);
  public:
-  klassOop find_witness_subtype(klassOop context_type, DepChange* changes = NULL) {
+  klassOop find_witness_subtype(klassOop context_type, KlassDepChange* changes = NULL) {
     assert(doing_subtype_search(), "must set up a subtype search");
     // When looking for unexpected concrete types,
     // do not look beneath expected ones.
@@ -817,7 +860,7 @@
       return find_witness_anywhere(context_type, participants_hide_witnesses);
     }
   }
-  klassOop find_witness_definer(klassOop context_type, DepChange* changes = NULL) {
+  klassOop find_witness_definer(klassOop context_type, KlassDepChange* changes = NULL) {
     assert(!doing_subtype_search(), "must set up a method definer search");
     // When looking for unexpected concrete methods,
     // look beneath expected ones, to see if there are overrides.
@@ -878,7 +921,7 @@
 #endif //PRODUCT
 
 
-klassOop ClassHierarchyWalker::find_witness_in(DepChange& changes,
+klassOop ClassHierarchyWalker::find_witness_in(KlassDepChange& changes,
                                                klassOop context_type,
                                                bool participants_hide_witnesses) {
   assert(changes.involves_context(context_type), "irrelevant dependency");
@@ -1137,7 +1180,7 @@
 // when dealing with the types of actual instances.
 klassOop Dependencies::check_abstract_with_unique_concrete_subtype(klassOop ctxk,
                                                                    klassOop conck,
-                                                                   DepChange* changes) {
+                                                                   KlassDepChange* changes) {
   ClassHierarchyWalker wf(conck);
   return wf.find_witness_subtype(ctxk, changes);
 }
@@ -1146,7 +1189,7 @@
 // instantiatable.  This can allow the compiler to make some paths go
 // dead, if they are gated by a test of the type.
 klassOop Dependencies::check_abstract_with_no_concrete_subtype(klassOop ctxk,
-                                                               DepChange* changes) {
+                                                               KlassDepChange* changes) {
   // Find any concrete subtype, with no participants:
   ClassHierarchyWalker wf;
   return wf.find_witness_subtype(ctxk, changes);
@@ -1156,7 +1199,7 @@
 // If a concrete class has no concrete subtypes, it can always be
 // exactly typed.  This allows the use of a cheaper type test.
 klassOop Dependencies::check_concrete_with_no_concrete_subtype(klassOop ctxk,
-                                                               DepChange* changes) {
+                                                               KlassDepChange* changes) {
   // Find any concrete subtype, with only the ctxk as participant:
   ClassHierarchyWalker wf(ctxk);
   return wf.find_witness_subtype(ctxk, changes);
@@ -1217,7 +1260,7 @@
                                                 klassOop ctxk,
                                                 klassOop k1,
                                                 klassOop k2,
-                                                DepChange* changes) {
+                                                KlassDepChange* changes) {
   ClassHierarchyWalker wf;
   wf.add_participant(k1);
   wf.add_participant(k2);
@@ -1278,7 +1321,7 @@
 // If a class (or interface) has a unique concrete method uniqm, return NULL.
 // Otherwise, return a class that contains an interfering method.
 klassOop Dependencies::check_unique_concrete_method(klassOop ctxk, methodOop uniqm,
-                                                    DepChange* changes) {
+                                                    KlassDepChange* changes) {
   // Here is a missing optimization:  If uniqm->is_final(),
   // we don't really need to search beneath it for overrides.
   // This is probably not important, since we don't use dependencies
@@ -1321,7 +1364,7 @@
 klassOop Dependencies::check_exclusive_concrete_methods(klassOop ctxk,
                                                         methodOop m1,
                                                         methodOop m2,
-                                                        DepChange* changes) {
+                                                        KlassDepChange* changes) {
   ClassHierarchyWalker wf(m1);
   wf.add_participant(m1->method_holder());
   wf.add_participant(m2->method_holder());
@@ -1383,7 +1426,7 @@
 }
 
 
-klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk, DepChange* changes) {
+klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk, KlassDepChange* changes) {
   Klass* search_at = ctxk->klass_part();
   if (changes != NULL)
     search_at = changes->new_type()->klass_part(); // just look at the new bit
@@ -1395,8 +1438,38 @@
 }
 
 
-klassOop Dependencies::DepStream::check_dependency_impl(DepChange* changes) {
+klassOop Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
+  assert(call_site    ->is_a(SystemDictionary::CallSite_klass()),     "sanity");
+  assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "sanity");
+  if (changes == NULL) {
+    // Validate all CallSites
+    if (java_lang_invoke_CallSite::target(call_site) != method_handle)
+      return call_site->klass();  // assertion failed
+  } else {
+    // Validate the given CallSite
+    if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) {
+      assert(method_handle != changes->method_handle(), "must be");
+      return call_site->klass();  // assertion failed
+    }
+  }
+  return NULL;  // assertion still valid
+}
+
+
+void Dependencies::DepStream::trace_and_log_witness(klassOop witness) {
+  if (witness != NULL) {
+    if (TraceDependencies) {
+      print_dependency(witness, /*verbose=*/ true);
+    }
+    // The following is a no-op unless logging is enabled:
+    log_dependency(witness);
+  }
+}
+
+
+klassOop Dependencies::DepStream::check_klass_dependency(KlassDepChange* changes) {
   assert_locked_or_safepoint(Compile_lock);
+  Dependencies::check_valid_dependency_type(type());
 
   klassOop witness = NULL;
   switch (type()) {
@@ -1407,95 +1480,103 @@
     witness = check_leaf_type(context_type());
     break;
   case abstract_with_unique_concrete_subtype:
-    witness = check_abstract_with_unique_concrete_subtype(context_type(),
-                                                          type_argument(1),
-                                                          changes);
+    witness = check_abstract_with_unique_concrete_subtype(context_type(), type_argument(1), changes);
     break;
   case abstract_with_no_concrete_subtype:
-    witness = check_abstract_with_no_concrete_subtype(context_type(),
-                                                      changes);
+    witness = check_abstract_with_no_concrete_subtype(context_type(), changes);
     break;
   case concrete_with_no_concrete_subtype:
-    witness = check_concrete_with_no_concrete_subtype(context_type(),
-                                                      changes);
+    witness = check_concrete_with_no_concrete_subtype(context_type(), changes);
     break;
   case unique_concrete_method:
-    witness = check_unique_concrete_method(context_type(),
-                                           method_argument(1),
-                                           changes);
+    witness = check_unique_concrete_method(context_type(), method_argument(1), changes);
     break;
   case abstract_with_exclusive_concrete_subtypes_2:
-    witness = check_abstract_with_exclusive_concrete_subtypes(context_type(),
-                                                              type_argument(1),
-                                                              type_argument(2),
-                                                              changes);
+    witness = check_abstract_with_exclusive_concrete_subtypes(context_type(), type_argument(1), type_argument(2), changes);
     break;
   case exclusive_concrete_methods_2:
-    witness = check_exclusive_concrete_methods(context_type(),
-                                               method_argument(1),
-                                               method_argument(2),
-                                               changes);
+    witness = check_exclusive_concrete_methods(context_type(), method_argument(1), method_argument(2), changes);
     break;
   case no_finalizable_subclasses:
-    witness = check_has_no_finalizable_subclasses(context_type(),
-                                                  changes);
+    witness = check_has_no_finalizable_subclasses(context_type(), changes);
     break;
-          default:
+  default:
     witness = NULL;
-    ShouldNotReachHere();
     break;
   }
-  if (witness != NULL) {
-    if (TraceDependencies) {
-      print_dependency(witness, /*verbose=*/ true);
-    }
-    // The following is a no-op unless logging is enabled:
-    log_dependency(witness);
+  trace_and_log_witness(witness);
+  return witness;
+}
+
+
+klassOop Dependencies::DepStream::check_call_site_dependency(CallSiteDepChange* changes) {
+  assert_locked_or_safepoint(Compile_lock);
+  Dependencies::check_valid_dependency_type(type());
+
+  klassOop witness = NULL;
+  switch (type()) {
+  case call_site_target_value:
+    witness = check_call_site_target_value(argument(0), argument(1), changes);
+    break;
+  default:
+    witness = NULL;
+    break;
   }
+  trace_and_log_witness(witness);
   return witness;
 }
 
 
 klassOop Dependencies::DepStream::spot_check_dependency_at(DepChange& changes) {
-  if (!changes.involves_context(context_type()))
-    // irrelevant dependency; skip it
-    return NULL;
+  // Handle klass dependency
+  if (changes.is_klass_change() && changes.as_klass_change()->involves_context(context_type()))
+    return check_klass_dependency(changes.as_klass_change());
 
-  return check_dependency_impl(&changes);
+  // Handle CallSite dependency
+  if (changes.is_call_site_change())
+    return check_call_site_dependency(changes.as_call_site_change());
+
+  // irrelevant dependency; skip it
+  return NULL;
 }
 
 
-void DepChange::initialize() {
-  // entire transaction must be under this lock:
-  assert_lock_strong(Compile_lock);
-
-  // Mark all dependee and all its superclasses
-  // Mark transitive interfaces
+void DepChange::print() {
+  int nsup = 0, nint = 0;
   for (ContextStream str(*this); str.next(); ) {
-    klassOop d = str.klass();
-    assert(!instanceKlass::cast(d)->is_marked_dependent(), "checking");
-    instanceKlass::cast(d)->set_is_marked_dependent(true);
+    klassOop k = str.klass();
+    switch (str.change_type()) {
+    case Change_new_type:
+      tty->print_cr("  dependee = %s", instanceKlass::cast(k)->external_name());
+      break;
+    case Change_new_sub:
+      if (!WizardMode) {
+        ++nsup;
+      } else {
+        tty->print_cr("  context super = %s", instanceKlass::cast(k)->external_name());
+      }
+      break;
+    case Change_new_impl:
+      if (!WizardMode) {
+        ++nint;
+      } else {
+        tty->print_cr("  context interface = %s", instanceKlass::cast(k)->external_name());
+      }
+      break;
+    }
+  }
+  if (nsup + nint != 0) {
+    tty->print_cr("  context supers = %d, interfaces = %d", nsup, nint);
   }
 }
 
-DepChange::~DepChange() {
-  // Unmark all dependee and all its superclasses
-  // Unmark transitive interfaces
-  for (ContextStream str(*this); str.next(); ) {
-    klassOop d = str.klass();
-    instanceKlass::cast(d)->set_is_marked_dependent(false);
-  }
-}
-
-bool DepChange::involves_context(klassOop k) {
-  if (k == NULL || !Klass::cast(k)->oop_is_instance()) {
-    return false;
-  }
-  instanceKlass* ik = instanceKlass::cast(k);
-  bool is_contained = ik->is_marked_dependent();
-  assert(is_contained == Klass::cast(new_type())->is_subtype_of(k),
-         "correct marking of potential context types");
-  return is_contained;
+void DepChange::ContextStream::start() {
+  klassOop new_type = _changes.is_klass_change() ? _changes.as_klass_change()->new_type() : (klassOop) NULL;
+  _change_type = (new_type == NULL ? NO_CHANGE : Start_Klass);
+  _klass = new_type;
+  _ti_base = NULL;
+  _ti_index = 0;
+  _ti_limit = 0;
 }
 
 bool DepChange::ContextStream::next() {
@@ -1534,35 +1615,39 @@
   return false;
 }
 
-void DepChange::print() {
-  int nsup = 0, nint = 0;
+void KlassDepChange::initialize() {
+  // entire transaction must be under this lock:
+  assert_lock_strong(Compile_lock);
+
+  // Mark all dependee and all its superclasses
+  // Mark transitive interfaces
   for (ContextStream str(*this); str.next(); ) {
-    klassOop k = str.klass();
-    switch (str.change_type()) {
-    case Change_new_type:
-      tty->print_cr("  dependee = %s", instanceKlass::cast(k)->external_name());
-      break;
-    case Change_new_sub:
-      if (!WizardMode) {
-        ++nsup;
-      } else {
-        tty->print_cr("  context super = %s", instanceKlass::cast(k)->external_name());
-      }
-      break;
-    case Change_new_impl:
-      if (!WizardMode) {
-        ++nint;
-      } else {
-        tty->print_cr("  context interface = %s", instanceKlass::cast(k)->external_name());
-      }
-      break;
-    }
+    klassOop d = str.klass();
+    assert(!instanceKlass::cast(d)->is_marked_dependent(), "checking");
+    instanceKlass::cast(d)->set_is_marked_dependent(true);
   }
-  if (nsup + nint != 0) {
-    tty->print_cr("  context supers = %d, interfaces = %d", nsup, nint);
+}
+
+KlassDepChange::~KlassDepChange() {
+  // Unmark all dependee and all its superclasses
+  // Unmark transitive interfaces
+  for (ContextStream str(*this); str.next(); ) {
+    klassOop d = str.klass();
+    instanceKlass::cast(d)->set_is_marked_dependent(false);
   }
 }
 
+bool KlassDepChange::involves_context(klassOop k) {
+  if (k == NULL || !Klass::cast(k)->oop_is_instance()) {
+    return false;
+  }
+  instanceKlass* ik = instanceKlass::cast(k);
+  bool is_contained = ik->is_marked_dependent();
+  assert(is_contained == Klass::cast(new_type())->is_subtype_of(k),
+         "correct marking of potential context types");
+  return is_contained;
+}
+
 #ifndef PRODUCT
 void Dependencies::print_statistics() {
   if (deps_find_witness_print != 0) {
--- a/src/share/vm/code/dependencies.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/code/dependencies.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,18 +25,21 @@
 #ifndef SHARE_VM_CODE_DEPENDENCIES_HPP
 #define SHARE_VM_CODE_DEPENDENCIES_HPP
 
+#include "ci/ciCallSite.hpp"
 #include "ci/ciKlass.hpp"
+#include "ci/ciMethodHandle.hpp"
+#include "classfile/systemDictionary.hpp"
 #include "code/compressedStream.hpp"
 #include "code/nmethod.hpp"
 #include "utilities/growableArray.hpp"
 
 //** Dependencies represent assertions (approximate invariants) within
-// the class hierarchy.  An example is an assertion that a given
-// method is not overridden; another example is that a type has only
-// one concrete subtype.  Compiled code which relies on such
-// assertions must be discarded if they are overturned by changes in
-// the class hierarchy.  We can think of these assertions as
-// approximate invariants, because we expect them to be overturned
+// the runtime system, e.g. class hierarchy changes.  An example is an
+// assertion that a given method is not overridden; another example is
+// that a type has only one concrete subtype.  Compiled code which
+// relies on such assertions must be discarded if they are overturned
+// by changes in the runtime system.  We can think of these assertions
+// as approximate invariants, because we expect them to be overturned
 // very infrequently.  We are willing to perform expensive recovery
 // operations when they are overturned.  The benefit, of course, is
 // performing optimistic optimizations (!) on the object code.
@@ -52,6 +55,8 @@
 class xmlStream;
 class CompileLog;
 class DepChange;
+class   KlassDepChange;
+class   CallSiteDepChange;
 class No_Safepoint_Verifier;
 
 class Dependencies: public ResourceObj {
@@ -152,15 +157,23 @@
     // subclasses require finalization registration.
     no_finalizable_subclasses,
 
+    // This dependency asserts when the CallSite.target value changed.
+    call_site_target_value,
+
     TYPE_LIMIT
   };
   enum {
     LG2_TYPE_LIMIT = 4,  // assert(TYPE_LIMIT <= (1<<LG2_TYPE_LIMIT))
 
     // handy categorizations of dependency types:
-    all_types      = ((1<<TYPE_LIMIT)-1) & ((-1)<<FIRST_TYPE),
-    non_ctxk_types = (1<<evol_method),
-    ctxk_types     = all_types & ~non_ctxk_types,
+    all_types           = ((1 << TYPE_LIMIT) - 1) & ((-1) << FIRST_TYPE),
+
+    non_klass_types     = (1 << call_site_target_value),
+    klass_types         = all_types & ~non_klass_types,
+
+    non_ctxk_types      = (1 << evol_method),
+    implicit_ctxk_types = (1 << call_site_target_value),
+    explicit_ctxk_types = all_types & ~(non_ctxk_types | implicit_ctxk_types),
 
     max_arg_count = 3,   // current maximum number of arguments (incl. ctxk)
 
@@ -176,9 +189,16 @@
 
   static const char* dep_name(DepType dept);
   static int         dep_args(DepType dept);
-  static int  dep_context_arg(DepType dept) {
-    return dept_in_mask(dept, ctxk_types)? 0: -1;
-  }
+
+  static bool is_klass_type(           DepType dept) { return dept_in_mask(dept, klass_types        ); }
+
+  static bool has_explicit_context_arg(DepType dept) { return dept_in_mask(dept, explicit_ctxk_types); }
+  static bool has_implicit_context_arg(DepType dept) { return dept_in_mask(dept, implicit_ctxk_types); }
+
+  static int           dep_context_arg(DepType dept) { return has_explicit_context_arg(dept) ? 0 : -1; }
+  static int  dep_implicit_context_arg(DepType dept) { return has_implicit_context_arg(dept) ? 0 : -1; }
+
+  static void check_valid_dependency_type(DepType dept);
 
  private:
   // State for writing a new set of dependencies:
@@ -241,8 +261,8 @@
   }
 
   void assert_common_1(DepType dept, ciObject* x);
-  void assert_common_2(DepType dept, ciKlass* ctxk, ciObject* x);
-  void assert_common_3(DepType dept, ciKlass* ctxk, ciObject* x, ciObject* x2);
+  void assert_common_2(DepType dept, ciObject* x0, ciObject* x1);
+  void assert_common_3(DepType dept, ciKlass* ctxk, ciObject* x1, ciObject* x2);
 
  public:
   // Adding assertions to a new dependency set at compile time:
@@ -255,6 +275,7 @@
   void assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2);
   void assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2);
   void assert_has_no_finalizable_subclasses(ciKlass* ctxk);
+  void assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle);
 
   // Define whether a given method or type is concrete.
   // These methods define the term "concrete" as used in this module.
@@ -296,19 +317,19 @@
   static klassOop check_evol_method(methodOop m);
   static klassOop check_leaf_type(klassOop ctxk);
   static klassOop check_abstract_with_unique_concrete_subtype(klassOop ctxk, klassOop conck,
-                                                              DepChange* changes = NULL);
+                                                              KlassDepChange* changes = NULL);
   static klassOop check_abstract_with_no_concrete_subtype(klassOop ctxk,
-                                                          DepChange* changes = NULL);
+                                                          KlassDepChange* changes = NULL);
   static klassOop check_concrete_with_no_concrete_subtype(klassOop ctxk,
-                                                          DepChange* changes = NULL);
+                                                          KlassDepChange* changes = NULL);
   static klassOop check_unique_concrete_method(klassOop ctxk, methodOop uniqm,
-                                               DepChange* changes = NULL);
+                                               KlassDepChange* changes = NULL);
   static klassOop check_abstract_with_exclusive_concrete_subtypes(klassOop ctxk, klassOop k1, klassOop k2,
-                                                                  DepChange* changes = NULL);
+                                                                  KlassDepChange* changes = NULL);
   static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2,
-                                                   DepChange* changes = NULL);
-  static klassOop check_has_no_finalizable_subclasses(klassOop ctxk,
-                                                      DepChange* changes = NULL);
+                                                   KlassDepChange* changes = NULL);
+  static klassOop check_has_no_finalizable_subclasses(klassOop ctxk, KlassDepChange* changes = NULL);
+  static klassOop check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
   // A returned klassOop is NULL if the dependency assertion is still
   // valid.  A non-NULL klassOop is a 'witness' to the assertion
   // failure, a point in the class hierarchy where the assertion has
@@ -415,7 +436,10 @@
     inline oop recorded_oop_at(int i);
         // => _code? _code->oop_at(i): *_deps->_oop_recorder->handle_at(i)
 
-    klassOop check_dependency_impl(DepChange* changes);
+    klassOop check_klass_dependency(KlassDepChange* changes);
+    klassOop check_call_site_dependency(CallSiteDepChange* changes);
+
+    void trace_and_log_witness(klassOop witness);
 
   public:
     DepStream(Dependencies* deps)
@@ -442,6 +466,8 @@
     oop argument(int i);         // => recorded_oop_at(argument_index(i))
     klassOop context_type();
 
+    bool is_klass_type()         { return Dependencies::is_klass_type(type()); }
+
     methodOop method_argument(int i) {
       oop x = argument(i);
       assert(x->is_method(), "type");
@@ -453,10 +479,13 @@
       return (klassOop) x;
     }
 
-    // The point of the whole exercise:  Is this dep is still OK?
+    // The point of the whole exercise:  Is this dep still OK?
     klassOop check_dependency() {
-      return check_dependency_impl(NULL);
+      klassOop result = check_klass_dependency(NULL);
+      if (result != NULL)  return result;
+      return check_call_site_dependency(NULL);
     }
+
     // A lighter version:  Checks only around recent changes in a class
     // hierarchy.  (See Universe::flush_dependents_on.)
     klassOop spot_check_dependency_at(DepChange& changes);
@@ -472,13 +501,27 @@
   static void print_statistics() PRODUCT_RETURN;
 };
 
-// A class hierarchy change coming through the VM (under the Compile_lock).
-// The change is structured as a single new type with any number of supers
-// and implemented interface types.  Other than the new type, any of the
-// super types can be context types for a relevant dependency, which the
-// new type could invalidate.
+
+// Every particular DepChange is a sub-class of this class.
 class DepChange : public StackObj {
  public:
+  // What kind of DepChange is this?
+  virtual bool is_klass_change()     const { return false; }
+  virtual bool is_call_site_change() const { return false; }
+
+  // Subclass casting with assertions.
+  KlassDepChange*    as_klass_change() {
+    assert(is_klass_change(), "bad cast");
+    return (KlassDepChange*) this;
+  }
+  CallSiteDepChange* as_call_site_change() {
+    assert(is_call_site_change(), "bad cast");
+    return (CallSiteDepChange*) this;
+  }
+
+  void print();
+
+ public:
   enum ChangeType {
     NO_CHANGE = 0,              // an uninvolved klass
     Change_new_type,            // a newly loaded type
@@ -488,28 +531,6 @@
     Start_Klass = CHANGE_LIMIT  // internal indicator for ContextStream
   };
 
- private:
-  // each change set is rooted in exactly one new type (at present):
-  KlassHandle _new_type;
-
-  void initialize();
-
- public:
-  // notes the new type, marks it and all its super-types
-  DepChange(KlassHandle new_type)
-    : _new_type(new_type)
-  {
-    initialize();
-  }
-
-  // cleans up the marks
-  ~DepChange();
-
-  klassOop new_type()                   { return _new_type(); }
-
-  // involves_context(k) is true if k is new_type or any of the super types
-  bool involves_context(klassOop k);
-
   // Usage:
   // for (DepChange::ContextStream str(changes); str.next(); ) {
   //   klassOop k = str.klass();
@@ -530,14 +551,7 @@
     int         _ti_limit;
 
     // start at the beginning:
-    void start() {
-      klassOop new_type = _changes.new_type();
-      _change_type = (new_type == NULL ? NO_CHANGE: Start_Klass);
-      _klass = new_type;
-      _ti_base = NULL;
-      _ti_index = 0;
-      _ti_limit = 0;
-    }
+    void start();
 
    public:
     ContextStream(DepChange& changes)
@@ -555,8 +569,62 @@
     klassOop   klass()           { return _klass; }
   };
   friend class DepChange::ContextStream;
+};
 
-  void print();
+
+// A class hierarchy change coming through the VM (under the Compile_lock).
+// The change is structured as a single new type with any number of supers
+// and implemented interface types.  Other than the new type, any of the
+// super types can be context types for a relevant dependency, which the
+// new type could invalidate.
+class KlassDepChange : public DepChange {
+ private:
+  // each change set is rooted in exactly one new type (at present):
+  KlassHandle _new_type;
+
+  void initialize();
+
+ public:
+  // notes the new type, marks it and all its super-types
+  KlassDepChange(KlassHandle new_type)
+    : _new_type(new_type)
+  {
+    initialize();
+  }
+
+  // cleans up the marks
+  ~KlassDepChange();
+
+  // What kind of DepChange is this?
+  virtual bool is_klass_change() const { return true; }
+
+  klassOop new_type() { return _new_type(); }
+
+  // involves_context(k) is true if k is new_type or any of the super types
+  bool involves_context(klassOop k);
+};
+
+
+// A CallSite has changed its target.
+class CallSiteDepChange : public DepChange {
+ private:
+  Handle _call_site;
+  Handle _method_handle;
+
+ public:
+  CallSiteDepChange(Handle call_site, Handle method_handle)
+    : _call_site(call_site),
+      _method_handle(method_handle)
+  {
+    assert(_call_site()    ->is_a(SystemDictionary::CallSite_klass()),     "must be");
+    assert(_method_handle()->is_a(SystemDictionary::MethodHandle_klass()), "must be");
+  }
+
+  // What kind of DepChange is this?
+  virtual bool is_call_site_change() const { return true; }
+
+  oop call_site()     const { return _call_site();     }
+  oop method_handle() const { return _method_handle(); }
 };
 
 #endif // SHARE_VM_CODE_DEPENDENCIES_HPP
--- a/src/share/vm/code/nmethod.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/code/nmethod.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "code/codeCache.hpp"
 #include "code/compiledIC.hpp"
+#include "code/dependencies.hpp"
 #include "code/nmethod.hpp"
 #include "code/scopeDesc.hpp"
 #include "compiler/abstractCompiler.hpp"
@@ -450,7 +451,6 @@
   _stack_traversal_mark       = 0;
   _unload_reported            = false;           // jvmti state
 
-  NOT_PRODUCT(_has_debug_info = false);
 #ifdef ASSERT
   _oops_are_stale             = false;
 #endif
--- a/src/share/vm/code/nmethod.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/code/nmethod.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -191,8 +191,6 @@
 
   jbyte _scavenge_root_state;
 
-  NOT_PRODUCT(bool _has_debug_info; )
-
   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
   // and is not made into a zombie. However, once the nmethod is made into
   // a zombie, it will be locked one final time if CompiledMethodUnload
@@ -329,11 +327,6 @@
   methodOop method() const                        { return _method; }
   AbstractCompiler* compiler() const              { return _compiler; }
 
-#ifndef PRODUCT
-  bool has_debug_info() const                     { return _has_debug_info; }
-  void set_has_debug_info(bool f)                 { _has_debug_info = false; }
-#endif // NOT PRODUCT
-
   // type info
   bool is_nmethod() const                         { return true; }
   bool is_java_method() const                     { return !method()->is_native(); }
--- a/src/share/vm/code/pcDesc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/code/pcDesc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -30,11 +30,10 @@
 #include "memory/resourceArea.hpp"
 
 PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
-  assert(sizeof(PcDescFlags) <= 4, "occupies more than a word");
   _pc_offset           = pc_offset;
   _scope_decode_offset = scope_decode_offset;
   _obj_decode_offset   = obj_decode_offset;
-  _flags.word          = 0;
+  _flags               = 0;
 }
 
 address PcDesc::real_pc(const nmethod* code) const {
@@ -44,7 +43,7 @@
 void PcDesc::print(nmethod* code) {
 #ifndef PRODUCT
   ResourceMark rm;
-  tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags.bits);
+  tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags);
 
   if (scope_decode_offset() == DebugInformationRecorder::serialized_null) {
     return;
--- a/src/share/vm/code/pcDesc.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/code/pcDesc.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -39,15 +39,17 @@
   int _scope_decode_offset; // offset for scope in nmethod
   int _obj_decode_offset;
 
-  union PcDescFlags {
-    int word;
-    struct {
-      unsigned int reexecute: 1;
-      unsigned int is_method_handle_invoke: 1;
-      unsigned int return_oop: 1;
-    } bits;
-    bool operator ==(const PcDescFlags& other) { return word == other.word; }
-  } _flags;
+  enum {
+    PCDESC_reexecute               = 1 << 0,
+    PCDESC_is_method_handle_invoke = 1 << 1,
+    PCDESC_return_oop              = 1 << 2
+  };
+
+  int _flags;
+
+  void set_flag(int mask, bool z) {
+    _flags = z ? (_flags | mask) : (_flags & ~mask);
+  }
 
  public:
   int pc_offset() const           { return _pc_offset;   }
@@ -69,8 +71,8 @@
   };
 
   // Flags
-  bool     should_reexecute()              const { return _flags.bits.reexecute; }
-  void set_should_reexecute(bool z)              { _flags.bits.reexecute = z;    }
+  bool     should_reexecute()              const { return (_flags & PCDESC_reexecute) != 0; }
+  void set_should_reexecute(bool z)              { set_flag(PCDESC_reexecute, z); }
 
   // Does pd refer to the same information as pd?
   bool is_same_info(const PcDesc* pd) {
@@ -79,11 +81,11 @@
       _flags == pd->_flags;
   }
 
-  bool     is_method_handle_invoke()       const { return _flags.bits.is_method_handle_invoke;     }
-  void set_is_method_handle_invoke(bool z)       {        _flags.bits.is_method_handle_invoke = z; }
+  bool     is_method_handle_invoke()       const { return (_flags & PCDESC_is_method_handle_invoke) != 0;     }
+  void set_is_method_handle_invoke(bool z)       { set_flag(PCDESC_is_method_handle_invoke, z); }
 
-  bool     return_oop()                    const { return _flags.bits.return_oop;     }
-  void set_return_oop(bool z)                    {        _flags.bits.return_oop = z; }
+  bool     return_oop()                    const { return (_flags & PCDESC_return_oop) != 0;     }
+  void set_return_oop(bool z)                    { set_flag(PCDESC_return_oop, z); }
 
   // Returns the real pc
   address real_pc(const nmethod* code) const;
--- a/src/share/vm/compiler/compileBroker.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/compiler/compileBroker.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -37,6 +37,8 @@
 // An entry in the compile queue.  It represents a pending or current
 // compilation.
 class CompileTask : public CHeapObj {
+  friend class VMStructs;
+
  private:
   Monitor*     _lock;
   uint         _compile_id;
--- a/src/share/vm/compiler/disassembler.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/compiler/disassembler.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -78,21 +78,46 @@
   char buf[JVM_MAXPATHLEN];
   os::jvm_path(buf, sizeof(buf));
   int jvm_offset = -1;
+  int lib_offset = -1;
   {
     // Match "jvm[^/]*" in jvm_path.
     const char* base = buf;
     const char* p = strrchr(buf, '/');
+    if (p != NULL) lib_offset = p - base + 1;
     p = strstr(p ? p : base, "jvm");
     if (p != NULL)  jvm_offset = p - base;
   }
+  // Find the disassembler shared library.
+  // Search for several paths derived from libjvm, in this order:
+  // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so  (for compatibility)
+  // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+  // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+  // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
   if (jvm_offset >= 0) {
-    // Find the disassembler next to libjvm.so.
+    // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
     strcpy(&buf[jvm_offset], hsdis_library_name);
     strcat(&buf[jvm_offset], os::dll_file_extension());
     _library = os::dll_load(buf, ebuf, sizeof ebuf);
+    if (_library == NULL) {
+      // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+      strcpy(&buf[lib_offset], hsdis_library_name);
+      strcat(&buf[lib_offset], os::dll_file_extension());
+      _library = os::dll_load(buf, ebuf, sizeof ebuf);
+    }
+    if (_library == NULL) {
+      // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+      buf[lib_offset - 1] = '\0';
+      const char* p = strrchr(buf, '/');
+      if (p != NULL) {
+        lib_offset = p - buf + 1;
+        strcpy(&buf[lib_offset], hsdis_library_name);
+        strcat(&buf[lib_offset], os::dll_file_extension());
+        _library = os::dll_load(buf, ebuf, sizeof ebuf);
+      }
+    }
   }
   if (_library == NULL) {
-    // Try a free-floating lookup.
+    // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
     strcpy(&buf[0], hsdis_library_name);
     strcat(&buf[0], os::dll_file_extension());
     _library = os::dll_load(buf, ebuf, sizeof ebuf);
@@ -249,7 +274,13 @@
       return arg;
     }
   } else if (match(event, "mach")) {
-   output()->print_cr("[Disassembling for mach='%s']", arg);
+    static char buffer[32] = { 0, };
+    if (strcmp(buffer, (const char*)arg) != 0 ||
+        strlen((const char*)arg) > sizeof(buffer) - 1) {
+      // Only print this when the mach changes
+      strncpy(buffer, (const char*)arg, sizeof(buffer) - 1);
+      output()->print_cr("[Disassembling for mach='%s']", arg);
+    }
   } else if (match(event, "format bytes-per-line")) {
     _bytes_per_line = (int) (intptr_t) arg;
   } else {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1833,8 +1833,6 @@
     }
   )
   _indexedFreeList[size].removeChunk(fc);
-  debug_only(fc->clearNext());
-  debug_only(fc->clearPrev());
   NOT_PRODUCT(
     if (FLSVerifyIndexTable) {
       verifyIndexedFreeList(size);
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2004,7 +2004,7 @@
   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
 
   ref_processor()->set_enqueuing_is_done(false);
-  ref_processor()->enable_discovery();
+  ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
   ref_processor()->setup_policy(clear_all_soft_refs);
   // If an asynchronous collection finishes, the _modUnionTable is
   // all clear.  If we are assuming the collection from an asynchronous
@@ -2025,9 +2025,6 @@
                                             _intra_sweep_estimate.padded_average());
   }
 
-  {
-    TraceCMSMemoryManagerStats tmms(gch->gc_cause());
-  }
   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
     ref_processor(), clear_all_soft_refs);
   #ifdef ASSERT
@@ -2716,6 +2713,10 @@
   bitMapLock()->unlock();
   releaseFreelistLocks();
 
+  if (!CleanChunkPoolAsync) {
+    Chunk::clean_chunk_pool();
+  }
+
   _between_prologue_and_epilogue = false;  // ready for next cycle
 }
 
@@ -3489,8 +3490,8 @@
     MutexLockerEx x(bitMapLock(),
                     Mutex::_no_safepoint_check_flag);
     checkpointRootsInitialWork(asynch);
-    rp->verify_no_references_recorded();
-    rp->enable_discovery(); // enable ("weak") refs discovery
+    // enable ("weak") refs discovery
+    rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
     _collectorState = Marking;
   } else {
     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
@@ -3502,7 +3503,8 @@
            "ref discovery for this generation kind");
     // already have locks
     checkpointRootsInitialWork(asynch);
-    rp->enable_discovery(); // now enable ("weak") refs discovery
+    // now enable ("weak") refs discovery
+    rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
     _collectorState = Marking;
   }
   SpecializationStats::print();
@@ -9341,15 +9343,3 @@
   }
 }
 
-// when bailing out of cms in concurrent mode failure
-TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(GCCause::Cause cause): TraceMemoryManagerStats() {
-  initialize(true /* fullGC */ ,
-             cause /* cause of the GC */,
-             true /* recordGCBeginTime */,
-             true /* recordPreGCUsage */,
-             true /* recordPeakUsage */,
-             true /* recordPostGCusage */,
-             true /* recordAccumulatedGCTime */,
-             true /* recordGCEndTime */,
-             true /* countCollection */ );
-}
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1900,7 +1900,6 @@
 
  public:
   TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
-  TraceCMSMemoryManagerStats(GCCause::Cause cause);
 };
 
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -114,17 +114,11 @@
     linkNext(ptr);
     if (ptr != NULL) ptr->linkPrev(this);
   }
-  void linkAfterNonNull(FreeChunk* ptr) {
-    assert(ptr != NULL, "precondition violation");
-    linkNext(ptr);
-    ptr->linkPrev(this);
-  }
   void linkNext(FreeChunk* ptr) { _next = ptr; }
   void linkPrev(FreeChunk* ptr) {
     LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
     _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
   }
-  void clearPrev()              { _prev = NULL; }
   void clearNext()              { _next = NULL; }
   void markNotFree() {
     // Set _prev (klass) to null before (if) clearing the mark word below
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -300,8 +300,21 @@
   // dictionary for example, this might be the first block and
   // in that case there would be no place that we could record
   // the stats (which are kept in the block itself).
-  assert(_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + 1   // Total Stock + 1
-          >= _allocation_stats.splitDeaths() + (ssize_t)count(), "Conservation Principle");
+  assert((_allocation_stats.prevSweep() + _allocation_stats.splitBirths()
+          + _allocation_stats.coalBirths() + 1)   // Total Production Stock + 1
+         >= (_allocation_stats.splitDeaths() + _allocation_stats.coalDeaths()
+             + (ssize_t)count()),                // Total Current Stock + depletion
+         err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
+                 " violates Conservation Principle: "
+                 "prevSweep(" SIZE_FORMAT ")"
+                 " + splitBirths(" SIZE_FORMAT ")"
+                 " + coalBirths(" SIZE_FORMAT ") + 1 >= "
+                 " splitDeaths(" SIZE_FORMAT ")"
+                 " coalDeaths(" SIZE_FORMAT ")"
+                 " + count(" SSIZE_FORMAT ")",
+                 this, _size, _allocation_stats.prevSweep(), _allocation_stats.splitBirths(),
+                 _allocation_stats.splitBirths(), _allocation_stats.splitDeaths(),
+                 _allocation_stats.coalDeaths(), count()));
 }
 
 void FreeList::assert_proper_lock_protection_work() const {
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #include "gc_implementation/g1/collectionSetChooser.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1ErgoVerbose.hpp"
 #include "memory/space.inline.hpp"
 
 CSetChooserCache::CSetChooserCache() {
@@ -358,6 +359,9 @@
   if (_cache.is_empty()) {
     assert(_curMarkedIndex == _numMarkedRegions,
            "if cache is empty, list should also be empty");
+    ergo_verbose0(ErgoCSetConstruction,
+                  "stop adding old regions to CSet",
+                  ergo_format_reason("cache is empty"));
     return NULL;
   }
 
@@ -368,10 +372,23 @@
   if (g1p->adaptive_young_list_length()) {
     if (time_remaining - predicted_time < 0.0) {
       g1h->check_if_region_is_too_expensive(predicted_time);
+      ergo_verbose2(ErgoCSetConstruction,
+                    "stop adding old regions to CSet",
+                    ergo_format_reason("predicted old region time higher than remaining time")
+                    ergo_format_ms("predicted old region time")
+                    ergo_format_ms("remaining time"),
+                    predicted_time, time_remaining);
       return NULL;
     }
   } else {
-    if (predicted_time > 2.0 * avg_prediction) {
+    double threshold = 2.0 * avg_prediction;
+    if (predicted_time > threshold) {
+      ergo_verbose2(ErgoCSetConstruction,
+                    "stop adding old regions to CSet",
+                    ergo_format_reason("predicted old region time higher than threshold")
+                    ergo_format_ms("predicted old region time")
+                    ergo_format_ms("threshold"),
+                    predicted_time, threshold);
       return NULL;
     }
   }
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -91,7 +91,7 @@
       }
     }
 
-    g1p->check_prediction_validity();
+    g1p->revise_young_list_target_length_if_necessary();
   }
 }
 
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -24,10 +24,12 @@
 
 #include "precompiled.hpp"
 #include "classfile/symbolTable.hpp"
-#include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/concurrentMark.inline.hpp"
 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1ErgoVerbose.hpp"
+#include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
@@ -69,7 +71,9 @@
   addr = (HeapWord*)align_size_up((intptr_t)addr,
                                   HeapWordSize << _shifter);
   size_t addrOffset = heapWordToOffset(addr);
-  if (limit == NULL) limit = _bmStartWord + _bmWordSize;
+  if (limit == NULL) {
+    limit = _bmStartWord + _bmWordSize;
+  }
   size_t limitOffset = heapWordToOffset(limit);
   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
@@ -82,7 +86,9 @@
 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
                                                  HeapWord* limit) const {
   size_t addrOffset = heapWordToOffset(addr);
-  if (limit == NULL) limit = _bmStartWord + _bmWordSize;
+  if (limit == NULL) {
+    limit = _bmStartWord + _bmWordSize;
+  }
   size_t limitOffset = heapWordToOffset(limit);
   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
@@ -176,18 +182,20 @@
 
 void CMMarkStack::allocate(size_t size) {
   _base = NEW_C_HEAP_ARRAY(oop, size);
-  if (_base == NULL)
+  if (_base == NULL) {
     vm_exit_during_initialization("Failed to allocate "
                                   "CM region mark stack");
+  }
   _index = 0;
-  // QQQQ cast ...
   _capacity = (jint) size;
   _oops_do_bound = -1;
   NOT_PRODUCT(_max_depth = 0);
 }
 
 CMMarkStack::~CMMarkStack() {
-  if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base);
+  if (_base != NULL) {
+    FREE_C_HEAP_ARRAY(oop, _base);
+  }
 }
 
 void CMMarkStack::par_push(oop ptr) {
@@ -280,16 +288,17 @@
 
 void CMRegionStack::allocate(size_t size) {
   _base = NEW_C_HEAP_ARRAY(MemRegion, size);
-  if (_base == NULL)
-    vm_exit_during_initialization("Failed to allocate "
-                                  "CM region mark stack");
+  if (_base == NULL) {
+    vm_exit_during_initialization("Failed to allocate CM region mark stack");
+  }
   _index = 0;
-  // QQQQ cast ...
   _capacity = (jint) size;
 }
 
 CMRegionStack::~CMRegionStack() {
-  if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base);
+  if (_base != NULL) {
+    FREE_C_HEAP_ARRAY(oop, _base);
+  }
 }
 
 void CMRegionStack::push_lock_free(MemRegion mr) {
@@ -421,7 +430,8 @@
     // the ones in CMS generation.
     newOop->oop_iterate(cl);
     if (yield_after && _cm->do_yield_check()) {
-      res = false; break;
+      res = false;
+      break;
     }
   }
   debug_only(_drain_in_progress = false);
@@ -492,19 +502,20 @@
   _total_counting_time(0.0),
   _total_rs_scrub_time(0.0),
 
-  _parallel_workers(NULL)
-{
-  CMVerboseLevel verbose_level =
-    (CMVerboseLevel) G1MarkingVerboseLevel;
-  if (verbose_level < no_verbose)
+  _parallel_workers(NULL) {
+  CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
+  if (verbose_level < no_verbose) {
     verbose_level = no_verbose;
-  if (verbose_level > high_verbose)
+  }
+  if (verbose_level > high_verbose) {
     verbose_level = high_verbose;
+  }
   _verbose_level = verbose_level;
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
+  }
 
   _markStack.allocate(MarkStackSize);
   _regionStack.allocate(G1MarkRegionStackSize);
@@ -580,10 +591,11 @@
       _marking_task_overhead    = 1.0;
     }
 
-    if (parallel_marking_threads() > 1)
+    if (parallel_marking_threads() > 1) {
       _cleanup_task_overhead = 1.0;
-    else
+    } else {
       _cleanup_task_overhead = marking_task_overhead();
+    }
     _cleanup_sleep_factor =
                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 
@@ -621,8 +633,7 @@
   // at the beginning of remark to be false. By ensuring that we do
   // not observe heap expansions after marking is complete, then we do
   // not have this problem.
-  if (!concurrent_marking_in_progress() && !force)
-    return;
+  if (!concurrent_marking_in_progress() && !force) return;
 
   MemRegion committed = _g1h->g1_committed();
   assert(committed.start() == _heap_start, "start shouldn't change");
@@ -655,8 +666,9 @@
   // reset all the marking data structures and any necessary flags
   clear_marking_state();
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[global] resetting");
+  }
 
   // We do reset all of them, since different phases will use
   // different number of active threads. So, it's easiest to have all
@@ -742,8 +754,9 @@
   size_t chunkSize = M;
   while (cur < end) {
     HeapWord* next = cur + chunkSize;
-    if (next > end)
+    if (next > end) {
       next = end;
+    }
     MemRegion mr(cur,next);
     _nextMarkBitMap->clearRange(mr);
     cur = next;
@@ -781,7 +794,7 @@
 #ifndef PRODUCT
   if (G1PrintReachableAtInitialMark) {
     print_reachable("at-cycle-start",
-                    true /* use_prev_marking */, true /* all */);
+                    VerifyOption_G1UsePrevMarking, true /* all */);
   }
 #endif
 
@@ -789,39 +802,6 @@
   reset();
 }
 
-class CMMarkRootsClosure: public OopsInGenClosure {
-private:
-  ConcurrentMark*  _cm;
-  G1CollectedHeap* _g1h;
-  bool             _do_barrier;
-
-public:
-  CMMarkRootsClosure(ConcurrentMark* cm,
-                     G1CollectedHeap* g1h,
-                     bool do_barrier) : _cm(cm), _g1h(g1h),
-                                        _do_barrier(do_barrier) { }
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-
-  template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      assert(obj->is_oop() || obj->mark() == NULL,
-             "expected an oop, possibly with mark word displaced");
-      HeapWord* addr = (HeapWord*)obj;
-      if (_g1h->is_in_g1_reserved(addr)) {
-        _cm->grayRoot(obj);
-      }
-    }
-    if (_do_barrier) {
-      assert(!_g1h->is_in_g1_reserved(p),
-             "Should be called on external roots");
-      do_barrier(p);
-    }
-  }
-};
 
 void ConcurrentMark::checkpointRootsInitialPost() {
   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
@@ -838,10 +818,10 @@
   NoteStartOfMarkHRClosure startcl;
   g1h->heap_region_iterate(&startcl);
 
-  // Start weak-reference discovery.
-  ReferenceProcessor* rp = g1h->ref_processor();
-  rp->verify_no_references_recorded();
-  rp->enable_discovery(); // enable ("weak") refs discovery
+  // Start Concurrent Marking weak-reference discovery.
+  ReferenceProcessor* rp = g1h->ref_processor_cm();
+  // enable ("weak") refs discovery
+  rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
@@ -856,50 +836,6 @@
   // during it. No need to call it here.
 }
 
-// Checkpoint the roots into this generation from outside
-// this generation. [Note this initial checkpoint need only
-// be approximate -- we'll do a catch up phase subsequently.]
-void ConcurrentMark::checkpointRootsInitial() {
-  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  double start = os::elapsedTime();
-
-  G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
-  g1p->record_concurrent_mark_init_start();
-  checkpointRootsInitialPre();
-
-  // YSR: when concurrent precleaning is in place, we'll
-  // need to clear the cached card table here
-
-  ResourceMark rm;
-  HandleMark  hm;
-
-  g1h->ensure_parsability(false);
-  g1h->perm_gen()->save_marks();
-
-  CMMarkRootsClosure notOlder(this, g1h, false);
-  CMMarkRootsClosure older(this, g1h, true);
-
-  g1h->set_marking_started();
-  g1h->rem_set()->prepare_for_younger_refs_iterate(false);
-
-  g1h->process_strong_roots(true,    // activate StrongRootsScope
-                            false,   // fake perm gen collection
-                            SharedHeap::SO_AllClasses,
-                            &notOlder, // Regular roots
-                            NULL,     // do not visit active blobs
-                            &older    // Perm Gen Roots
-                            );
-  checkpointRootsInitialPost();
-
-  // Statistics.
-  double end = os::elapsedTime();
-  _init_times.add((end - start) * 1000.0);
-
-  g1p->record_concurrent_mark_init_end();
-}
-
 /*
  * Notice that in the next two methods, we actually leave the STS
  * during the barrier sync and join it immediately afterwards. If we
@@ -922,8 +858,9 @@
  */
 
 void ConcurrentMark::enter_first_sync_barrier(int task_num) {
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[%d] entering first barrier", task_num);
+  }
 
   if (concurrent()) {
     ConcurrentGCThread::stsLeave();
@@ -935,8 +872,9 @@
   // at this point everyone should have synced up and not be doing any
   // more work
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[%d] leaving first barrier", task_num);
+  }
 
   // let task 0 do this
   if (task_num == 0) {
@@ -960,8 +898,9 @@
 }
 
 void ConcurrentMark::enter_second_sync_barrier(int task_num) {
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[%d] entering second barrier", task_num);
+  }
 
   if (concurrent()) {
     ConcurrentGCThread::stsLeave();
@@ -972,8 +911,9 @@
   }
   // at this point everything should be re-initialised and ready to go
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[%d] leaving second barrier", task_num);
+  }
 }
 
 #ifndef PRODUCT
@@ -1012,8 +952,9 @@
   assert(_g1h->g1_committed().contains(addr),
          "address should be within the heap bounds");
 
-  if (!_nextMarkBitMap->isMarked(addr))
+  if (!_nextMarkBitMap->isMarked(addr)) {
     _nextMarkBitMap->parMark(addr);
+  }
 }
 
 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
@@ -1021,17 +962,19 @@
   // the caller. We only need to decide whether to push the region on
   // the region stack or not.
 
-  if (!concurrent_marking_in_progress() || !_should_gray_objects)
+  if (!concurrent_marking_in_progress() || !_should_gray_objects) {
     // We're done with marking and waiting for remark. We do not need to
     // push anything else on the region stack.
     return;
+  }
 
   HeapWord* finger = _finger;
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[global] attempting to push "
                            "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at "
                            PTR_FORMAT, mr.start(), mr.end(), finger);
+  }
 
   if (mr.start() < finger) {
     // The finger is always heap region aligned and it is not possible
@@ -1045,14 +988,16 @@
            "region boundaries should fall within the committed space");
     assert(mr.end() <= _heap_end,
            "region boundaries should fall within the committed space");
-    if (verbose_low())
+    if (verbose_low()) {
       gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") "
                              "below the finger, pushing it",
                              mr.start(), mr.end());
+    }
 
     if (!region_stack_push_lock_free(mr)) {
-      if (verbose_low())
+      if (verbose_low()) {
         gclog_or_tty->print_cr("[global] region stack has overflown.");
+      }
     }
   }
 }
@@ -1066,10 +1011,11 @@
     // We definitely need to mark it, irrespective whether we bail out
     // because we're done with marking.
     if (_nextMarkBitMap->parMark(addr)) {
-      if (!concurrent_marking_in_progress() || !_should_gray_objects)
+      if (!concurrent_marking_in_progress() || !_should_gray_objects) {
         // If we're done with concurrent marking and we're waiting for
         // remark, then we're not pushing anything on the stack.
         return;
+      }
 
       // No OrderAccess:store_load() is needed. It is implicit in the
       // CAS done in parMark(addr) above
@@ -1077,9 +1023,10 @@
 
       if (addr < finger) {
         if (!mark_stack_push(oop(addr))) {
-          if (verbose_low())
+          if (verbose_low()) {
             gclog_or_tty->print_cr("[global] global stack overflow "
                                    "during parMark");
+          }
         }
       }
     }
@@ -1174,10 +1121,11 @@
   set_phase(active_workers, true /* concurrent */);
 
   CMConcurrentMarkingTask markingTask(this, cmThread());
-  if (parallel_marking_threads() > 0)
+  if (parallel_marking_threads() > 0) {
     _parallel_workers->run_task(&markingTask);
-  else
+  } else {
     markingTask.work(0);
+  }
   print_stats();
 }
 
@@ -1185,6 +1133,7 @@
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
          "world should be stopped");
+
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // If a full collection has happened, we shouldn't do this.
@@ -1199,7 +1148,9 @@
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(before)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(true, false, true);
+    Universe::verify(/* allow dirty */ true,
+                     /* silent      */ false,
+                     /* option      */ VerifyOption_G1UsePrevMarking);
   }
 
   G1CollectorPolicy* g1p = g1h->g1_policy();
@@ -1218,8 +1169,9 @@
     _restart_for_overflow = true;
     // Clear the flag. We do not need it any more.
     clear_has_overflown();
-    if (G1TraceMarkStackOverflow)
+    if (G1TraceMarkStackOverflow) {
       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
+    }
   } else {
     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
     // We're done with marking.
@@ -1232,9 +1184,9 @@
       HandleMark hm;  // handle scope
       gclog_or_tty->print(" VerifyDuringGC:(after)");
       Universe::heap()->prepare_for_verify();
-      Universe::heap()->verify(/* allow_dirty */      true,
-                               /* silent */           false,
-                               /* use_prev_marking */ false);
+      Universe::verify(/* allow dirty */ true,
+                       /* silent      */ false,
+                       /* option      */ VerifyOption_G1UseNextMarking);
     }
     assert(!restart_for_overflow(), "sanity");
   }
@@ -1326,9 +1278,7 @@
       size_t end_index = index + 1;
       while (end_index < g1h->n_regions()) {
         HeapRegion* chr = g1h->region_at(end_index);
-        if (!chr->continuesHumongous()) {
-          break;
-        }
+        if (!chr->continuesHumongous()) break;
         end_index += 1;
       }
       _region_bm->par_at_put_range((BitMap::idx_t) index,
@@ -1337,8 +1287,9 @@
   }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (!_final && _regions_done == 0)
+    if (!_final && _regions_done == 0) {
       _start_vtime_sec = os::elapsedVTime();
+    }
 
     if (hr->continuesHumongous()) {
       // We will ignore these here and process them when their
@@ -1431,8 +1382,9 @@
       _changed = true;
     }
     // Handle the last range, if any.
-    if (start_card_num != -1)
+    if (start_card_num != -1) {
       mark_card_num_range(start_card_num, last_card_num);
+    }
     if (_final) {
       // Mark the allocated-since-marking portion...
       HeapWord* tp = hr->top();
@@ -1509,14 +1461,14 @@
   BitMap* _card_bm;
 public:
   G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm,
-                      BitMap* region_bm, BitMap* card_bm) :
-    AbstractGangTask("G1 final counting"), _g1h(g1h),
-    _bm(bm), _region_bm(region_bm), _card_bm(card_bm)
-  {
-    if (ParallelGCThreads > 0)
+                      BitMap* region_bm, BitMap* card_bm)
+    : AbstractGangTask("G1 final counting"), _g1h(g1h),
+      _bm(bm), _region_bm(region_bm), _card_bm(card_bm) {
+    if (ParallelGCThreads > 0) {
       _n_workers = _g1h->workers()->total_workers();
-    else
+    } else {
       _n_workers = 1;
+    }
     _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
     _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
   }
@@ -1628,6 +1580,23 @@
       _max_live_bytes += g1_note_end.max_live_bytes();
       _freed_bytes += g1_note_end.freed_bytes();
 
+      // If we iterate over the global cleanup list at the end of
+      // cleanup to do this printing we will not guarantee to only
+      // generate output for the newly-reclaimed regions (the list
+      // might not be empty at the beginning of cleanup; we might
+      // still be working on its previous contents). So we do the
+      // printing here, before we append the new regions to the global
+      // cleanup list.
+
+      G1HRPrinter* hr_printer = _g1h->hr_printer();
+      if (hr_printer->is_active()) {
+        HeapRegionLinkedListIterator iter(&local_cleanup_list);
+        while (iter.more_available()) {
+          HeapRegion* hr = iter.get_next();
+          hr_printer->cleanup(hr);
+        }
+      }
+
       _cleanup_list->add_as_tail(&local_cleanup_list);
       assert(local_cleanup_list.is_empty(), "post-condition");
 
@@ -1701,7 +1670,9 @@
                               true /* par */);
     double region_time = (os::elapsedTime() - start);
     _claimed_region_time += region_time;
-    if (region_time > _max_region_time) _max_region_time = region_time;
+    if (region_time > _max_region_time) {
+      _max_region_time = region_time;
+    }
   }
   return false;
 }
@@ -1724,9 +1695,9 @@
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(before)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* allow dirty  */ true,
-                     /* silent       */ false,
-                     /* prev marking */ true);
+    Universe::verify(/* allow dirty */ true,
+                     /* silent      */ false,
+                     /* option      */ VerifyOption_G1UsePrevMarking);
   }
 
   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
@@ -1758,18 +1729,21 @@
 
   size_t known_garbage_bytes =
     g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
-#if 0
-  gclog_or_tty->print_cr("used %1.2lf, live %1.2lf, garbage %1.2lf",
-                         (double) g1_par_count_task.used_bytes() / (double) (1024 * 1024),
-                         (double) g1_par_count_task.live_bytes() / (double) (1024 * 1024),
-                         (double) known_garbage_bytes / (double) (1024 * 1024));
-#endif // 0
   g1p->set_known_garbage_bytes(known_garbage_bytes);
 
   size_t start_used_bytes = g1h->used();
   _at_least_one_mark_complete = true;
   g1h->set_marking_complete();
 
+  ergo_verbose4(ErgoConcCycles,
+           "finish cleanup",
+           ergo_format_byte("occupancy")
+           ergo_format_byte("capacity")
+           ergo_format_byte_perc("known garbage"),
+           start_used_bytes, g1h->capacity(),
+           known_garbage_bytes,
+           ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0);
+
   double count_end = os::elapsedTime();
   double this_final_counting_time = (count_end - start);
   if (G1PrintParCleanupStats) {
@@ -1864,6 +1838,10 @@
   size_t cleaned_up_bytes = start_used_bytes - g1h->used();
   g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
 
+  // Clean up will have freed any regions completely full of garbage.
+  // Update the soft reference policy with the new heap occupancy.
+  Universe::update_heap_info_at_gc();
+
   // We need to make this be a "collection" so any collection pause that
   // races with it goes around and waits for completeCleanup to finish.
   g1h->increment_total_collections();
@@ -1872,9 +1850,9 @@
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(after)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* allow dirty  */ true,
-                     /* silent       */ false,
-                     /* prev marking */ true);
+    Universe::verify(/* allow dirty */ true,
+                     /* silent      */ false,
+                     /* option      */ VerifyOption_G1UsePrevMarking);
   }
 
   g1h->verify_region_sets_optional();
@@ -1960,10 +1938,11 @@
     oop obj = oopDesc::load_decode_heap_oop(p);
     HeapWord* addr = (HeapWord*)obj;
 
-    if (_cm->verbose_high())
+    if (_cm->verbose_high()) {
       gclog_or_tty->print_cr("\t[0] we're looking at location "
-                               "*"PTR_FORMAT" = "PTR_FORMAT,
-                               p, (void*) obj);
+                             "*"PTR_FORMAT" = "PTR_FORMAT,
+                             p, (void*) obj);
+    }
 
     if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
       _bitMap->mark(addr);
@@ -2025,10 +2004,11 @@
   template <class T> void do_oop_work(T* p) {
     if (!_cm->has_overflown()) {
       oop obj = oopDesc::load_decode_heap_oop(p);
-      if (_cm->verbose_high())
+      if (_cm->verbose_high()) {
         gclog_or_tty->print_cr("\t[%d] we're looking at location "
                                "*"PTR_FORMAT" = "PTR_FORMAT,
                                _task->task_id(), p, (void*) obj);
+      }
 
       _task->deal_with_reference(obj);
       _ref_counter--;
@@ -2055,8 +2035,9 @@
         _ref_counter = _ref_counter_limit;
       }
     } else {
-       if (_cm->verbose_high())
+      if (_cm->verbose_high()) {
          gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id());
+      }
     }
   }
 };
@@ -2071,8 +2052,10 @@
 
   void do_void() {
     do {
-      if (_cm->verbose_high())
-        gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", _task->task_id());
+      if (_cm->verbose_high()) {
+        gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step",
+                               _task->task_id());
+      }
 
       // We call CMTask::do_marking_step() to completely drain the local and
       // global marking stacks. The routine is called in a loop, which we'll
@@ -2094,8 +2077,10 @@
   }
 };
 
-// Implementation of AbstractRefProcTaskExecutor for G1
-class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+// Implementation of AbstractRefProcTaskExecutor for parallel
+// reference processing at the end of G1 concurrent marking
+
+class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 private:
   G1CollectedHeap* _g1h;
   ConcurrentMark*  _cm;
@@ -2104,7 +2089,7 @@
   int              _active_workers;
 
 public:
-  G1RefProcTaskExecutor(G1CollectedHeap* g1h,
+  G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
                         ConcurrentMark* cm,
                         CMBitMap* bitmap,
                         WorkGang* workers,
@@ -2118,7 +2103,7 @@
   virtual void execute(EnqueueTask& task);
 };
 
-class G1RefProcTaskProxy: public AbstractGangTask {
+class G1CMRefProcTaskProxy: public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   ProcessTask&     _proc_task;
   G1CollectedHeap* _g1h;
@@ -2126,7 +2111,7 @@
   CMBitMap*        _bitmap;
 
 public:
-  G1RefProcTaskProxy(ProcessTask& proc_task,
+  G1CMRefProcTaskProxy(ProcessTask& proc_task,
                      G1CollectedHeap* g1h,
                      ConcurrentMark* cm,
                      CMBitMap* bitmap) :
@@ -2144,10 +2129,10 @@
   }
 };
 
-void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
+void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
   assert(_workers != NULL, "Need parallel worker threads.");
 
-  G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
+  G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
 
   // We need to reset the phase for each task execution so that
   // the termination protocol of CMTask::do_marking_step works.
@@ -2157,12 +2142,12 @@
   _g1h->set_par_threads(0);
 }
 
-class G1RefEnqueueTaskProxy: public AbstractGangTask {
+class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   EnqueueTask& _enq_task;
 
 public:
-  G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
+  G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
     AbstractGangTask("Enqueue reference objects in parallel"),
     _enq_task(enq_task)
   { }
@@ -2172,10 +2157,10 @@
   }
 };
 
-void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
+void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
   assert(_workers != NULL, "Need parallel worker threads.");
 
-  G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
+  G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
 
   _g1h->set_par_threads(_active_workers);
   _workers->run_task(&enq_task_proxy);
@@ -2185,71 +2170,84 @@
 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
   ResourceMark rm;
   HandleMark   hm;
-  G1CollectedHeap* g1h   = G1CollectedHeap::heap();
-  ReferenceProcessor* rp = g1h->ref_processor();
-
-  // See the comment in G1CollectedHeap::ref_processing_init()
-  // about how reference processing currently works in G1.
-
-  // Process weak references.
-  rp->setup_policy(clear_all_soft_refs);
-  assert(_markStack.isEmpty(), "mark stack should be empty");
-
-  G1CMIsAliveClosure   g1_is_alive(g1h);
-  G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
-  G1CMDrainMarkingStackClosure
-    g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
-  // We use the work gang from the G1CollectedHeap and we utilize all
-  // the worker threads.
-  int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
-  active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
-
-  G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
-                                          g1h->workers(), active_workers);
-
-
-  if (rp->processing_is_mt()) {
-    // Set the degree of MT here.  If the discovery is done MT, there
-    // may have been a different number of threads doing the discovery
-    // and a different number of discovered lists may have Ref objects.
-    // That is OK as long as the Reference lists are balanced (see
-    // balance_all_queues() and balance_queues()).
-    rp->set_active_mt_degree(active_workers);
-
-    rp->process_discovered_references(&g1_is_alive,
+
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  // Is alive closure.
+  G1CMIsAliveClosure g1_is_alive(g1h);
+
+  // Inner scope to exclude the cleaning of the string and symbol
+  // tables from the displayed time.
+  {
+    bool verbose = PrintGC && PrintGCDetails;
+    if (verbose) {
+      gclog_or_tty->put(' ');
+    }
+    TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
+
+    ReferenceProcessor* rp = g1h->ref_processor_cm();
+
+    // See the comment in G1CollectedHeap::ref_processing_init()
+    // about how reference processing currently works in G1.
+
+    // Process weak references.
+    rp->setup_policy(clear_all_soft_refs);
+    assert(_markStack.isEmpty(), "mark stack should be empty");
+
+    G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
+    G1CMDrainMarkingStackClosure
+      g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
+
+    // We use the work gang from the G1CollectedHeap and we utilize all
+    // the worker threads.
+    int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
+    active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
+
+    G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
+                                              g1h->workers(), active_workers);
+
+    if (rp->processing_is_mt()) {
+      // Set the degree of MT here.  If the discovery is done MT, there
+      // may have been a different number of threads doing the discovery
+      // and a different number of discovered lists may have Ref objects.
+      // That is OK as long as the Reference lists are balanced (see
+      // balance_all_queues() and balance_queues()).
+      rp->set_active_mt_degree(active_workers);
+
+      rp->process_discovered_references(&g1_is_alive,
                                       &g1_keep_alive,
                                       &g1_drain_mark_stack,
                                       &par_task_executor);
 
-    // The work routines of the parallel keep_alive and drain_marking_stack
-    // will set the has_overflown flag if we overflow the global marking
-    // stack.
-  } else {
-    rp->process_discovered_references(&g1_is_alive,
-                                      &g1_keep_alive,
-                                      &g1_drain_mark_stack,
-                                      NULL);
-
+      // The work routines of the parallel keep_alive and drain_marking_stack
+      // will set the has_overflown flag if we overflow the global marking
+      // stack.
+    } else {
+      rp->process_discovered_references(&g1_is_alive,
+                                        &g1_keep_alive,
+                                        &g1_drain_mark_stack,
+                                        NULL);
+    }
+
+    assert(_markStack.overflow() || _markStack.isEmpty(),
+            "mark stack should be empty (unless it overflowed)");
+    if (_markStack.overflow()) {
+      // Should have been done already when we tried to push an
+      // entry on to the global mark stack. But let's do it again.
+      set_has_overflown();
+    }
+
+    if (rp->processing_is_mt()) {
+      assert(rp->num_q() == active_workers, "why not");
+      rp->enqueue_discovered_references(&par_task_executor);
+    } else {
+      rp->enqueue_discovered_references();
+    }
+
+    rp->verify_no_references_recorded();
+    assert(!rp->discovery_enabled(), "Post condition");
   }
 
-  assert(_markStack.overflow() || _markStack.isEmpty(),
-      "mark stack should be empty (unless it overflowed)");
-  if (_markStack.overflow()) {
-    // Should have been done already when we tried to push an
-    // entry on to the global mark stack. But let's do it again.
-    set_has_overflown();
-  }
-
-  if (rp->processing_is_mt()) {
-    assert(rp->num_q() == active_workers, "why not");
-    rp->enqueue_discovered_references(&par_task_executor);
-  } else {
-    rp->enqueue_discovered_references();
-  }
-
-  rp->verify_no_references_recorded();
-  assert(!rp->discovery_enabled(), "should have been disabled");
-
   // Now clean up stale oops in StringTable
   StringTable::unlink(&g1_is_alive);
   // Clean up unreferenced symbols in symbol table.
@@ -2343,18 +2341,16 @@
 class PrintReachableOopClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
-  CMBitMapRO*      _bitmap;
   outputStream*    _out;
-  bool             _use_prev_marking;
+  VerifyOption     _vo;
   bool             _all;
 
 public:
-  PrintReachableOopClosure(CMBitMapRO*   bitmap,
-                           outputStream* out,
-                           bool          use_prev_marking,
+  PrintReachableOopClosure(outputStream* out,
+                           VerifyOption  vo,
                            bool          all) :
     _g1h(G1CollectedHeap::heap()),
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
+    _out(out), _vo(vo), _all(all) { }
 
   void do_oop(narrowOop* p) { do_oop_work(p); }
   void do_oop(      oop* p) { do_oop_work(p); }
@@ -2372,12 +2368,23 @@
       HeapRegion* hr  = _g1h->heap_region_containing(obj);
       guarantee(hr != NULL, "invariant");
       bool over_tams = false;
-      if (_use_prev_marking) {
-        over_tams = hr->obj_allocated_since_prev_marking(obj);
-      } else {
-        over_tams = hr->obj_allocated_since_next_marking(obj);
+      bool marked = false;
+
+      switch (_vo) {
+        case VerifyOption_G1UsePrevMarking:
+          over_tams = hr->obj_allocated_since_prev_marking(obj);
+          marked = _g1h->isMarkedPrev(obj);
+          break;
+        case VerifyOption_G1UseNextMarking:
+          over_tams = hr->obj_allocated_since_next_marking(obj);
+          marked = _g1h->isMarkedNext(obj);
+          break;
+        case VerifyOption_G1UseMarkWord:
+          marked = obj->is_gc_marked();
+          break;
+        default:
+          ShouldNotReachHere();
       }
-      bool marked = _bitmap->isMarked((HeapWord*) obj);
 
       if (over_tams) {
         str = " >";
@@ -2398,35 +2405,45 @@
 
 class PrintReachableObjectClosure : public ObjectClosure {
 private:
-  CMBitMapRO*   _bitmap;
-  outputStream* _out;
-  bool          _use_prev_marking;
-  bool          _all;
-  HeapRegion*   _hr;
+  G1CollectedHeap* _g1h;
+  outputStream*    _out;
+  VerifyOption     _vo;
+  bool             _all;
+  HeapRegion*      _hr;
 
 public:
-  PrintReachableObjectClosure(CMBitMapRO*   bitmap,
-                              outputStream* out,
-                              bool          use_prev_marking,
+  PrintReachableObjectClosure(outputStream* out,
+                              VerifyOption  vo,
                               bool          all,
                               HeapRegion*   hr) :
-    _bitmap(bitmap), _out(out),
-    _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { }
+    _g1h(G1CollectedHeap::heap()),
+    _out(out), _vo(vo), _all(all), _hr(hr) { }
 
   void do_object(oop o) {
-    bool over_tams;
-    if (_use_prev_marking) {
-      over_tams = _hr->obj_allocated_since_prev_marking(o);
-    } else {
-      over_tams = _hr->obj_allocated_since_next_marking(o);
+    bool over_tams = false;
+    bool marked = false;
+
+    switch (_vo) {
+      case VerifyOption_G1UsePrevMarking:
+        over_tams = _hr->obj_allocated_since_prev_marking(o);
+        marked = _g1h->isMarkedPrev(o);
+        break;
+      case VerifyOption_G1UseNextMarking:
+        over_tams = _hr->obj_allocated_since_next_marking(o);
+        marked = _g1h->isMarkedNext(o);
+        break;
+      case VerifyOption_G1UseMarkWord:
+        marked = o->is_gc_marked();
+        break;
+      default:
+        ShouldNotReachHere();
     }
-    bool marked = _bitmap->isMarked((HeapWord*) o);
     bool print_it = _all || over_tams || marked;
 
     if (print_it) {
       _out->print_cr(" "PTR_FORMAT"%s",
                      o, (over_tams) ? " >" : (marked) ? " M" : "");
-      PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all);
+      PrintReachableOopClosure oopCl(_out, _vo, _all);
       o->oop_iterate(&oopCl);
     }
   }
@@ -2434,9 +2451,8 @@
 
 class PrintReachableRegionClosure : public HeapRegionClosure {
 private:
-  CMBitMapRO*   _bitmap;
   outputStream* _out;
-  bool          _use_prev_marking;
+  VerifyOption  _vo;
   bool          _all;
 
 public:
@@ -2445,10 +2461,21 @@
     HeapWord* e = hr->end();
     HeapWord* t = hr->top();
     HeapWord* p = NULL;
-    if (_use_prev_marking) {
-      p = hr->prev_top_at_mark_start();
-    } else {
-      p = hr->next_top_at_mark_start();
+
+    switch (_vo) {
+      case VerifyOption_G1UsePrevMarking:
+        p = hr->prev_top_at_mark_start();
+        break;
+      case VerifyOption_G1UseNextMarking:
+        p = hr->next_top_at_mark_start();
+        break;
+      case VerifyOption_G1UseMarkWord:
+        // When we are verifying marking using the mark word
+        // TAMS has no relevance.
+        assert(p == NULL, "post-condition");
+        break;
+      default:
+        ShouldNotReachHere();
     }
     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
                    "TAMS: "PTR_FORMAT, b, e, t, p);
@@ -2460,8 +2487,7 @@
     if (to > from) {
       _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
       _out->cr();
-      PrintReachableObjectClosure ocl(_bitmap, _out,
-                                      _use_prev_marking, _all, hr);
+      PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
       _out->cr();
     }
@@ -2469,15 +2495,25 @@
     return false;
   }
 
-  PrintReachableRegionClosure(CMBitMapRO*   bitmap,
-                              outputStream* out,
-                              bool          use_prev_marking,
+  PrintReachableRegionClosure(outputStream* out,
+                              VerifyOption  vo,
                               bool          all) :
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
+    _out(out), _vo(vo), _all(all) { }
 };
 
+static const char* verify_option_to_tams(VerifyOption vo) {
+  switch (vo) {
+    case VerifyOption_G1UsePrevMarking:
+      return "PTAMS";
+    case VerifyOption_G1UseNextMarking:
+      return "NTAMS";
+    default:
+      return "NONE";
+  }
+}
+
 void ConcurrentMark::print_reachable(const char* str,
-                                     bool use_prev_marking,
+                                     VerifyOption vo,
                                      bool all) {
   gclog_or_tty->cr();
   gclog_or_tty->print_cr("== Doing heap dump... ");
@@ -2504,20 +2540,12 @@
   }
 
   outputStream* out = &fout;
-
-  CMBitMapRO* bitmap = NULL;
-  if (use_prev_marking) {
-    bitmap = _prevMarkBitMap;
-  } else {
-    bitmap = _nextMarkBitMap;
-  }
-
-  out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS");
+  out->print_cr("-- USING %s", verify_option_to_tams(vo));
   out->cr();
 
   out->print_cr("--- ITERATING OVER REGIONS");
   out->cr();
-  PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all);
+  PrintReachableRegionClosure rcl(out, vo, all);
   _g1h->heap_region_iterate(&rcl);
   out->cr();
 
@@ -2546,34 +2574,42 @@
 };
 
 void ConcurrentMark::deal_with_reference(oop obj) {
-  if (verbose_high())
+  if (verbose_high()) {
     gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT,
                            (void*) obj);
-
+  }
 
   HeapWord* objAddr = (HeapWord*) obj;
   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
   if (_g1h->is_in_g1_reserved(objAddr)) {
-    assert(obj != NULL, "is_in_g1_reserved should ensure this");
-    HeapRegion* hr = _g1h->heap_region_containing(obj);
-    if (_g1h->is_obj_ill(obj, hr)) {
-      if (verbose_high())
-        gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered "
-                               "marked", (void*) obj);
-
-      // we need to mark it first
-      if (_nextMarkBitMap->parMark(objAddr)) {
-        // No OrderAccess:store_load() is needed. It is implicit in the
-        // CAS done in parMark(objAddr) above
-        HeapWord* finger = _finger;
-        if (objAddr < finger) {
-          if (verbose_high())
-            gclog_or_tty->print_cr("[global] below the global finger "
-                                   "("PTR_FORMAT"), pushing it", finger);
-          if (!mark_stack_push(obj)) {
-            if (verbose_low())
-              gclog_or_tty->print_cr("[global] global stack overflow during "
-                                     "deal_with_reference");
+    assert(obj != NULL, "null check is implicit");
+    if (!_nextMarkBitMap->isMarked(objAddr)) {
+      // Only get the containing region if the object is not marked on the
+      // bitmap (otherwise, it's a waste of time since we won't do
+      // anything with it).
+      HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
+      if (!hr->obj_allocated_since_next_marking(obj)) {
+        if (verbose_high()) {
+          gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered "
+                                 "marked", (void*) obj);
+        }
+
+        // we need to mark it first
+        if (_nextMarkBitMap->parMark(objAddr)) {
+          // No OrderAccess:store_load() is needed. It is implicit in the
+          // CAS done in parMark(objAddr) above
+          HeapWord* finger = _finger;
+          if (objAddr < finger) {
+            if (verbose_high()) {
+              gclog_or_tty->print_cr("[global] below the global finger "
+                                     "("PTR_FORMAT"), pushing it", finger);
+            }
+            if (!mark_stack_push(obj)) {
+              if (verbose_low()) {
+                gclog_or_tty->print_cr("[global] global stack overflow during "
+                                       "deal_with_reference");
+              }
+            }
           }
         }
       }
@@ -2587,8 +2623,9 @@
   satb_mq_set.set_closure(&oc);
 
   while (satb_mq_set.apply_closure_to_completed_buffer()) {
-    if (verbose_medium())
+    if (verbose_medium()) {
       gclog_or_tty->print_cr("[global] processed an SATB buffer");
+    }
   }
 
   // no need to check whether we should do this, as this is only
@@ -2631,21 +2668,43 @@
   while (finger < _heap_end) {
     assert(_g1h->is_in_g1_reserved(finger), "invariant");
 
-    // is the gap between reading the finger and doing the CAS too long?
-
-    HeapRegion* curr_region   = _g1h->heap_region_containing(finger);
+    // Note on how this code handles humongous regions. In the
+    // normal case the finger will reach the start of a "starts
+    // humongous" (SH) region. Its end will either be the end of the
+    // last "continues humongous" (CH) region in the sequence, or the
+    // standard end of the SH region (if the SH is the only region in
+    // the sequence). That way claim_region() will skip over the CH
+    // regions. However, there is a subtle race between a CM thread
+    // executing this method and a mutator thread doing a humongous
+    // object allocation. The two are not mutually exclusive as the CM
+    // thread does not need to hold the Heap_lock when it gets
+    // here. So there is a chance that claim_region() will come across
+    // a free region that's in the progress of becoming a SH or a CH
+    // region. In the former case, it will either
+    //   a) Miss the update to the region's end, in which case it will
+    //      visit every subsequent CH region, will find their bitmaps
+    //      empty, and do nothing, or
+    //   b) Will observe the update of the region's end (in which case
+    //      it will skip the subsequent CH regions).
+    // If it comes across a region that suddenly becomes CH, the
+    // scenario will be similar to b). So, the race between
+    // claim_region() and a humongous object allocation might force us
+    // to do a bit of unnecessary work (due to some unnecessary bitmap
+    // iterations) but it should not introduce and correctness issues.
+    HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
     HeapWord*   bottom        = curr_region->bottom();
     HeapWord*   end           = curr_region->end();
     HeapWord*   limit         = curr_region->next_top_at_mark_start();
 
-    if (verbose_low())
+    if (verbose_low()) {
       gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" "
                              "["PTR_FORMAT", "PTR_FORMAT"), "
                              "limit = "PTR_FORMAT,
                              task_num, curr_region, bottom, end, limit);
-
-    HeapWord* res =
-      (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
+    }
+
+    // Is the gap between reading the finger and doing the CAS too long?
+    HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
     if (res == finger) {
       // we succeeded
 
@@ -2653,32 +2712,36 @@
       // someone else might have moved the finger even further
       assert(_finger >= end, "the finger should have moved forward");
 
-      if (verbose_low())
+      if (verbose_low()) {
         gclog_or_tty->print_cr("[%d] we were successful with region = "
                                PTR_FORMAT, task_num, curr_region);
+      }
 
       if (limit > bottom) {
-        if (verbose_low())
+        if (verbose_low()) {
           gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, "
                                  "returning it ", task_num, curr_region);
+        }
         return curr_region;
       } else {
         assert(limit == bottom,
                "the region limit should be at bottom");
-        if (verbose_low())
+        if (verbose_low()) {
           gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, "
                                  "returning NULL", task_num, curr_region);
+        }
         // we return NULL and the caller should try calling
         // claim_region() again.
         return NULL;
       }
     } else {
       assert(_finger > finger, "the finger should have moved forward");
-      if (verbose_low())
+      if (verbose_low()) {
         gclog_or_tty->print_cr("[%d] somebody else moved the finger, "
                                "global finger = "PTR_FORMAT", "
                                "our finger = "PTR_FORMAT,
                                task_num, _finger, finger);
+      }
 
       // read it again
       finger = _finger;
@@ -2722,18 +2785,20 @@
 }
 
 void ConcurrentMark::oops_do(OopClosure* cl) {
-  if (_markStack.size() > 0 && verbose_low())
+  if (_markStack.size() > 0 && verbose_low()) {
     gclog_or_tty->print_cr("[global] scanning the global marking stack, "
                            "size = %d", _markStack.size());
+  }
   // we first iterate over the contents of the mark stack...
   _markStack.oops_do(cl);
 
   for (int i = 0; i < (int)_max_task_num; ++i) {
     OopTaskQueue* queue = _task_queues->queue((int)i);
 
-    if (queue->size() > 0 && verbose_low())
+    if (queue->size() > 0 && verbose_low()) {
       gclog_or_tty->print_cr("[global] scanning task queue of task %d, "
                              "size = %d", i, queue->size());
+    }
 
     // ...then over the contents of the all the task queues.
     queue->oops_do(cl);
@@ -2805,14 +2870,17 @@
       return false;
     }
     _ms[_ms_ind] = obj;
-    if (obj->is_objArray()) _array_ind_stack[_ms_ind] = arr_ind;
+    if (obj->is_objArray()) {
+      _array_ind_stack[_ms_ind] = arr_ind;
+    }
     _ms_ind++;
     return true;
   }
 
   oop pop() {
-    if (_ms_ind == 0) return NULL;
-    else {
+    if (_ms_ind == 0) {
+      return NULL;
+    } else {
       _ms_ind--;
       return _ms[_ms_ind];
     }
@@ -3011,17 +3079,19 @@
 // newCSet().
 
 void ConcurrentMark::newCSet() {
-  if (!concurrent_marking_in_progress())
+  if (!concurrent_marking_in_progress()) {
     // nothing to do if marking is not in progress
     return;
+  }
 
   // find what the lowest finger is among the global and local fingers
   _min_finger = _finger;
   for (int i = 0; i < (int)_max_task_num; ++i) {
     CMTask* task = _tasks[i];
     HeapWord* task_finger = task->finger();
-    if (task_finger != NULL && task_finger < _min_finger)
+    if (task_finger != NULL && task_finger < _min_finger) {
       _min_finger = task_finger;
+    }
   }
 
   _should_gray_objects = false;
@@ -3041,17 +3111,18 @@
   // irrespective whether all collection set regions are below the
   // finger, if the region stack is not empty. This is expected to be
   // a rare case, so I don't think it's necessary to be smarted about it.
-  if (!region_stack_empty() || has_aborted_regions())
+  if (!region_stack_empty() || has_aborted_regions()) {
     _should_gray_objects = true;
+  }
 }
 
 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
-  if (!concurrent_marking_in_progress())
-    return;
+  if (!concurrent_marking_in_progress()) return;
 
   HeapWord* region_end = hr->end();
-  if (region_end > _min_finger)
+  if (region_end > _min_finger) {
     _should_gray_objects = true;
+  }
 }
 
 // Resets the region fields of active CMTasks whose values point
@@ -3152,11 +3223,13 @@
 // We take a break if someone is trying to stop the world.
 bool ConcurrentMark::do_yield_check(int worker_i) {
   if (should_yield()) {
-    if (worker_i == 0)
+    if (worker_i == 0) {
       _g1h->g1_policy()->record_concurrent_pause();
+    }
     cmThread()->yield();
-    if (worker_i == 0)
+    if (worker_i == 0) {
       _g1h->g1_policy()->record_concurrent_pause_end();
+    }
     return true;
   } else {
     return false;
@@ -3174,9 +3247,8 @@
 
 bool ConcurrentMark::containing_cards_are_marked(void* start,
                                                  void* last) {
-  return
-    containing_card_is_marked(start) &&
-    containing_card_is_marked(last);
+  return containing_card_is_marked(start) &&
+         containing_card_is_marked(last);
 }
 
 #ifndef PRODUCT
@@ -3191,6 +3263,22 @@
 }
 #endif
 
+void CMTask::scan_object(oop obj) {
+  assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
+
+  if (_cm->verbose_high()) {
+    gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
+                           _task_id, (void*) obj);
+  }
+
+  size_t obj_size = obj->size();
+  _words_scanned += obj_size;
+
+  obj->oop_iterate(_cm_oop_closure);
+  statsOnly( ++_objs_scanned );
+  check_limits();
+}
+
 // Closure for iteration over bitmaps
 class CMBitMapClosure : public BitMapClosure {
 private:
@@ -3254,43 +3342,17 @@
   CMObjectClosure(CMTask* task) : _task(task) { }
 };
 
-// Closure for iterating over object fields
-class CMOopClosure : public OopClosure {
-private:
-  G1CollectedHeap*   _g1h;
-  ConcurrentMark*    _cm;
-  CMTask*            _task;
-
-public:
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-
-  template <class T> void do_oop_work(T* p) {
-    assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
-    assert(!_g1h->is_on_master_free_list(
-                    _g1h->heap_region_containing((HeapWord*) p)), "invariant");
-
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    if (_cm->verbose_high())
-      gclog_or_tty->print_cr("[%d] we're looking at location "
-                             "*"PTR_FORMAT" = "PTR_FORMAT,
-                             _task->task_id(), p, (void*) obj);
-    _task->deal_with_reference(obj);
+G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
+                               ConcurrentMark* cm,
+                               CMTask* task)
+  : _g1h(g1h), _cm(cm), _task(task) {
+  assert(_ref_processor == NULL, "should be initialized to NULL");
+
+  if (G1UseConcMarkReferenceProcessing) {
+    _ref_processor = g1h->ref_processor_cm();
+    assert(_ref_processor != NULL, "should not be NULL");
   }
-
-  CMOopClosure(G1CollectedHeap* g1h,
-               ConcurrentMark* cm,
-               CMTask* task)
-    : _g1h(g1h), _cm(cm), _task(task)
-  {
-    assert(_ref_processor == NULL, "should be initialized to NULL");
-
-    if (G1UseConcMarkReferenceProcessing) {
-      _ref_processor = g1h->ref_processor();
-      assert(_ref_processor != NULL, "should not be NULL");
-    }
-  }
-};
+}
 
 void CMTask::setup_for_region(HeapRegion* hr) {
   // Separated the asserts so that we know which one fires.
@@ -3299,9 +3361,10 @@
   assert(!hr->continuesHumongous(),
         "claim_region() should have filtered out continues humongous regions");
 
-  if (_cm->verbose_low())
+  if (_cm->verbose_low()) {
     gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT,
                            _task_id, hr);
+  }
 
   _curr_region  = hr;
   _finger       = hr->bottom();
@@ -3314,10 +3377,11 @@
   HeapWord* limit           = hr->next_top_at_mark_start();
 
   if (limit == bottom) {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] found an empty region "
                              "["PTR_FORMAT", "PTR_FORMAT")",
                              _task_id, bottom, limit);
+    }
     // The region was collected underneath our feet.
     // We set the finger to bottom to ensure that the bitmap
     // iteration that will follow this will not do anything.
@@ -3346,9 +3410,10 @@
 
 void CMTask::giveup_current_region() {
   assert(_curr_region != NULL, "invariant");
-  if (_cm->verbose_low())
+  if (_cm->verbose_low()) {
     gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT,
                            _task_id, _curr_region);
+  }
   clear_region_fields();
 }
 
@@ -3362,11 +3427,21 @@
   _region_finger = NULL;
 }
 
+void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
+  if (cm_oop_closure == NULL) {
+    assert(_cm_oop_closure != NULL, "invariant");
+  } else {
+    assert(_cm_oop_closure == NULL, "invariant");
+  }
+  _cm_oop_closure = cm_oop_closure;
+}
+
 void CMTask::reset(CMBitMap* nextMarkBitMap) {
   guarantee(nextMarkBitMap != NULL, "invariant");
 
-  if (_cm->verbose_low())
+  if (_cm->verbose_low()) {
     gclog_or_tty->print_cr("[%d] resetting", _task_id);
+  }
 
   _nextMarkBitMap                = nextMarkBitMap;
   clear_region_fields();
@@ -3411,118 +3486,6 @@
   return !_cm->mark_stack_empty() || has_aborted();
 }
 
-// This determines whether the method below will check both the local
-// and global fingers when determining whether to push on the stack a
-// gray object (value 1) or whether it will only check the global one
-// (value 0). The tradeoffs are that the former will be a bit more
-// accurate and possibly push less on the stack, but it might also be
-// a little bit slower.
-
-#define _CHECK_BOTH_FINGERS_      1
-
-void CMTask::deal_with_reference(oop obj) {
-  if (_cm->verbose_high())
-    gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT,
-                           _task_id, (void*) obj);
-
-  ++_refs_reached;
-
-  HeapWord* objAddr = (HeapWord*) obj;
-  assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
-  if (_g1h->is_in_g1_reserved(objAddr)) {
-    assert(obj != NULL, "is_in_g1_reserved should ensure this");
-    HeapRegion* hr =  _g1h->heap_region_containing(obj);
-    if (_g1h->is_obj_ill(obj, hr)) {
-      if (_cm->verbose_high())
-        gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked",
-                               _task_id, (void*) obj);
-
-      // we need to mark it first
-      if (_nextMarkBitMap->parMark(objAddr)) {
-        // No OrderAccess:store_load() is needed. It is implicit in the
-        // CAS done in parMark(objAddr) above
-        HeapWord* global_finger = _cm->finger();
-
-#if _CHECK_BOTH_FINGERS_
-        // we will check both the local and global fingers
-
-        if (_finger != NULL && objAddr < _finger) {
-          if (_cm->verbose_high())
-            gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), "
-                                   "pushing it", _task_id, _finger);
-          push(obj);
-        } else if (_curr_region != NULL && objAddr < _region_limit) {
-          // do nothing
-        } else if (objAddr < global_finger) {
-          // Notice that the global finger might be moving forward
-          // concurrently. This is not a problem. In the worst case, we
-          // mark the object while it is above the global finger and, by
-          // the time we read the global finger, it has moved forward
-          // passed this object. In this case, the object will probably
-          // be visited when a task is scanning the region and will also
-          // be pushed on the stack. So, some duplicate work, but no
-          // correctness problems.
-
-          if (_cm->verbose_high())
-            gclog_or_tty->print_cr("[%d] below the global finger "
-                                   "("PTR_FORMAT"), pushing it",
-                                   _task_id, global_finger);
-          push(obj);
-        } else {
-          // do nothing
-        }
-#else // _CHECK_BOTH_FINGERS_
-        // we will only check the global finger
-
-        if (objAddr < global_finger) {
-          // see long comment above
-
-          if (_cm->verbose_high())
-            gclog_or_tty->print_cr("[%d] below the global finger "
-                                   "("PTR_FORMAT"), pushing it",
-                                   _task_id, global_finger);
-          push(obj);
-        }
-#endif // _CHECK_BOTH_FINGERS_
-      }
-    }
-  }
-}
-
-void CMTask::push(oop obj) {
-  HeapWord* objAddr = (HeapWord*) obj;
-  assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
-  assert(!_g1h->is_on_master_free_list(
-              _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
-  assert(!_g1h->is_obj_ill(obj), "invariant");
-  assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
-
-  if (_cm->verbose_high())
-    gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
-
-  if (!_task_queue->push(obj)) {
-    // The local task queue looks full. We need to push some entries
-    // to the global stack.
-
-    if (_cm->verbose_medium())
-      gclog_or_tty->print_cr("[%d] task queue overflow, "
-                             "moving entries to the global stack",
-                             _task_id);
-    move_entries_to_global_stack();
-
-    // this should succeed since, even if we overflow the global
-    // stack, we should have definitely removed some entries from the
-    // local queue. So, there must be space on it.
-    bool success = _task_queue->push(obj);
-    assert(success, "invariant");
-  }
-
-  statsOnly( int tmp_size = _task_queue->size();
-             if (tmp_size > _local_max_size)
-               _local_max_size = tmp_size;
-             ++_local_pushes );
-}
-
 void CMTask::reached_limit() {
   assert(_words_scanned >= _words_scanned_limit ||
          _refs_reached >= _refs_reached_limit ,
@@ -3531,8 +3494,7 @@
 }
 
 void CMTask::regular_clock_call() {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   // First, we need to recalculate the words scanned and refs reached
   // limits for the next clock call.
@@ -3549,8 +3511,7 @@
   // If we are not concurrent (i.e. we're doing remark) we don't need
   // to check anything else. The other steps are only needed during
   // the concurrent marking phase.
-  if (!concurrent())
-    return;
+  if (!concurrent()) return;
 
   // (2) If marking has been aborted for Full GC, then we also abort.
   if (_cm->has_aborted()) {
@@ -3563,23 +3524,25 @@
 
   // (3) If marking stats are enabled, then we update the step history.
 #if _MARKING_STATS_
-  if (_words_scanned >= _words_scanned_limit)
+  if (_words_scanned >= _words_scanned_limit) {
     ++_clock_due_to_scanning;
-  if (_refs_reached >= _refs_reached_limit)
+  }
+  if (_refs_reached >= _refs_reached_limit) {
     ++_clock_due_to_marking;
+  }
 
   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
   _interval_start_time_ms = curr_time_ms;
   _all_clock_intervals_ms.add(last_interval_ms);
 
   if (_cm->verbose_medium()) {
-    gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, "
-                           "scanned = %d%s, refs reached = %d%s",
-                           _task_id, last_interval_ms,
-                           _words_scanned,
-                           (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
-                           _refs_reached,
-                           (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
+      gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, "
+                        "scanned = %d%s, refs reached = %d%s",
+                        _task_id, last_interval_ms,
+                        _words_scanned,
+                        (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
+                        _refs_reached,
+                        (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
   }
 #endif // _MARKING_STATS_
 
@@ -3606,9 +3569,10 @@
   // buffers available for processing. If there are, we abort.
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers",
                              _task_id);
+    }
     // we do need to process SATB buffers, we'll abort and restart
     // the marking task to do so
     set_has_aborted();
@@ -3631,8 +3595,9 @@
   // entries to/from the global stack). It basically tries to decrease the
   // scanning limit so that the clock is called earlier.
 
-  if (_cm->verbose_medium())
+  if (_cm->verbose_medium()) {
     gclog_or_tty->print_cr("[%d] decreasing limits", _task_id);
+  }
 
   _words_scanned_limit = _real_words_scanned_limit -
     3 * words_scanned_period / 4;
@@ -3658,18 +3623,22 @@
     statsOnly( ++_global_transfers_to; _local_pops += n );
 
     if (!_cm->mark_stack_push(buffer, n)) {
-      if (_cm->verbose_low())
-        gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", _task_id);
+      if (_cm->verbose_low()) {
+        gclog_or_tty->print_cr("[%d] aborting due to global stack overflow",
+                               _task_id);
+      }
       set_has_aborted();
     } else {
       // the transfer was successful
 
-      if (_cm->verbose_medium())
+      if (_cm->verbose_medium()) {
         gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack",
                                _task_id, n);
+      }
       statsOnly( int tmp_size = _cm->mark_stack_size();
-                 if (tmp_size > _global_max_size)
+                 if (tmp_size > _global_max_size) {
                    _global_max_size = tmp_size;
+                 }
                  _global_pushes += n );
     }
   }
@@ -3690,9 +3659,10 @@
     // yes, we did actually pop at least one entry
 
     statsOnly( ++_global_transfers_from; _global_pops += n );
-    if (_cm->verbose_medium())
+    if (_cm->verbose_medium()) {
       gclog_or_tty->print_cr("[%d] popped %d entries from the global stack",
                              _task_id, n);
+    }
     for (int i = 0; i < n; ++i) {
       bool success = _task_queue->push(buffer[i]);
       // We only call this when the local queue is empty or under a
@@ -3701,8 +3671,9 @@
     }
 
     statsOnly( int tmp_size = _task_queue->size();
-               if (tmp_size > _local_max_size)
+               if (tmp_size > _local_max_size) {
                  _local_max_size = tmp_size;
+               }
                _local_pushes += n );
   }
 
@@ -3711,31 +3682,33 @@
 }
 
 void CMTask::drain_local_queue(bool partially) {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   // Decide what the target size is, depending whether we're going to
   // drain it partially (so that other tasks can steal if they run out
   // of things to do) or totally (at the very end).
   size_t target_size;
-  if (partially)
+  if (partially) {
     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
-  else
+  } else {
     target_size = 0;
+  }
 
   if (_task_queue->size() > target_size) {
-    if (_cm->verbose_high())
+    if (_cm->verbose_high()) {
       gclog_or_tty->print_cr("[%d] draining local queue, target size = %d",
                              _task_id, target_size);
+    }
 
     oop obj;
     bool ret = _task_queue->pop_local(obj);
     while (ret) {
       statsOnly( ++_local_pops );
 
-      if (_cm->verbose_high())
+      if (_cm->verbose_high()) {
         gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id,
                                (void*) obj);
+      }
 
       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
       assert(!_g1h->is_on_master_free_list(
@@ -3743,21 +3716,22 @@
 
       scan_object(obj);
 
-      if (_task_queue->size() <= target_size || has_aborted())
+      if (_task_queue->size() <= target_size || has_aborted()) {
         ret = false;
-      else
+      } else {
         ret = _task_queue->pop_local(obj);
+      }
     }
 
-    if (_cm->verbose_high())
+    if (_cm->verbose_high()) {
       gclog_or_tty->print_cr("[%d] drained local queue, size = %d",
                              _task_id, _task_queue->size());
+    }
   }
 }
 
 void CMTask::drain_global_stack(bool partially) {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   // We have a policy to drain the local queue before we attempt to
   // drain the global stack.
@@ -3770,24 +3744,27 @@
   // because another task might be doing the same, we might in fact
   // drop below the target. But, this is not a problem.
   size_t target_size;
-  if (partially)
+  if (partially) {
     target_size = _cm->partial_mark_stack_size_target();
-  else
+  } else {
     target_size = 0;
+  }
 
   if (_cm->mark_stack_size() > target_size) {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] draining global_stack, target size %d",
                              _task_id, target_size);
+    }
 
     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
       get_entries_from_global_stack();
       drain_local_queue(partially);
     }
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] drained global stack, size = %d",
                              _task_id, _cm->mark_stack_size());
+    }
   }
 }
 
@@ -3796,8 +3773,7 @@
 // replicated. We should really get rid of the single-threaded version
 // of the code to simplify things.
 void CMTask::drain_satb_buffers() {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   // We set this so that the regular clock knows that we're in the
   // middle of draining buffers and doesn't set the abort flag when it
@@ -3807,26 +3783,29 @@
 
   CMObjectClosure oc(this);
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-  if (G1CollectedHeap::use_parallel_gc_threads())
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
     satb_mq_set.set_par_closure(_task_id, &oc);
-  else
+  } else {
     satb_mq_set.set_closure(&oc);
+  }
 
   // This keeps claiming and applying the closure to completed buffers
   // until we run out of buffers or we need to abort.
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     while (!has_aborted() &&
            satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) {
-      if (_cm->verbose_medium())
+      if (_cm->verbose_medium()) {
         gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id);
+      }
       statsOnly( ++_satb_buffers_processed );
       regular_clock_call();
     }
   } else {
     while (!has_aborted() &&
            satb_mq_set.apply_closure_to_completed_buffer()) {
-      if (_cm->verbose_medium())
+      if (_cm->verbose_medium()) {
         gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id);
+      }
       statsOnly( ++_satb_buffers_processed );
       regular_clock_call();
     }
@@ -3834,10 +3813,11 @@
 
   if (!concurrent() && !has_aborted()) {
     // We should only do this during remark.
-    if (G1CollectedHeap::use_parallel_gc_threads())
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
       satb_mq_set.par_iterate_closure_all_threads(_task_id);
-    else
+    } else {
       satb_mq_set.iterate_closure_all_threads();
+    }
   }
 
   _draining_satb_buffers = false;
@@ -3846,10 +3826,11 @@
          concurrent() ||
          satb_mq_set.completed_buffers_num() == 0, "invariant");
 
-  if (G1CollectedHeap::use_parallel_gc_threads())
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
     satb_mq_set.set_par_closure(_task_id, NULL);
-  else
+  } else {
     satb_mq_set.set_closure(NULL);
+  }
 
   // again, this was a potentially expensive operation, decrease the
   // limits to get the regular clock call early
@@ -3857,16 +3838,16 @@
 }
 
 void CMTask::drain_region_stack(BitMapClosure* bc) {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   assert(_region_finger == NULL,
          "it should be NULL when we're not scanning a region");
 
   if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] draining region stack, size = %d",
                              _task_id, _cm->region_stack_size());
+    }
 
     MemRegion mr;
 
@@ -3874,9 +3855,11 @@
       mr = _aborted_region;
       _aborted_region = MemRegion();
 
-      if (_cm->verbose_low())
-        gclog_or_tty->print_cr("[%d] scanning aborted region [ " PTR_FORMAT ", " PTR_FORMAT " )",
-                             _task_id, mr.start(), mr.end());
+      if (_cm->verbose_low()) {
+        gclog_or_tty->print_cr("[%d] scanning aborted region "
+                               "[ " PTR_FORMAT ", " PTR_FORMAT " )",
+                               _task_id, mr.start(), mr.end());
+      }
     } else {
       mr = _cm->region_stack_pop_lock_free();
       // it returns MemRegion() if the pop fails
@@ -3884,10 +3867,11 @@
     }
 
     while (mr.start() != NULL) {
-      if (_cm->verbose_medium())
+      if (_cm->verbose_medium()) {
         gclog_or_tty->print_cr("[%d] we are scanning region "
                                "["PTR_FORMAT", "PTR_FORMAT")",
                                _task_id, mr.start(), mr.end());
+      }
 
       assert(mr.end() <= _cm->finger(),
              "otherwise the region shouldn't be on the stack");
@@ -3898,9 +3882,9 @@
 
         // We finished iterating over the region without aborting.
         regular_clock_call();
-        if (has_aborted())
+        if (has_aborted()) {
           mr = MemRegion();
-        else {
+        } else {
           mr = _cm->region_stack_pop_lock_free();
           // it returns MemRegion() if the pop fails
           statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
@@ -3946,9 +3930,10 @@
       _region_finger = NULL;
     }
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] drained region stack, size = %d",
                              _task_id, _cm->region_stack_size());
+    }
   }
 }
 
@@ -4149,17 +4134,18 @@
 
   ++_calls;
 
-  if (_cm->verbose_low())
+  if (_cm->verbose_low()) {
     gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, "
                            "target = %1.2lfms >>>>>>>>>>",
                            _task_id, _calls, _time_target_ms);
+  }
 
   // Set up the bitmap and oop closures. Anything that uses them is
   // eventually called from this method, so it is OK to allocate these
   // statically.
   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
-  CMOopClosure    oop_closure(_g1h, _cm, this);
-  set_oop_closure(&oop_closure);
+  G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
+  set_cm_oop_closure(&cm_oop_closure);
 
   if (_cm->has_overflown()) {
     // This can happen if the region stack or the mark stack overflows
@@ -4209,11 +4195,12 @@
       // fresh region, _finger points to start().
       MemRegion mr = MemRegion(_finger, _region_limit);
 
-      if (_cm->verbose_low())
+      if (_cm->verbose_low()) {
         gclog_or_tty->print_cr("[%d] we're scanning part "
                                "["PTR_FORMAT", "PTR_FORMAT") "
                                "of region "PTR_FORMAT,
                                _task_id, _finger, _region_limit, _curr_region);
+      }
 
       // Let's iterate over the bitmap of the part of the
       // region that is left.
@@ -4269,17 +4256,19 @@
       assert(_curr_region  == NULL, "invariant");
       assert(_finger       == NULL, "invariant");
       assert(_region_limit == NULL, "invariant");
-      if (_cm->verbose_low())
+      if (_cm->verbose_low()) {
         gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id);
+      }
       HeapRegion* claimed_region = _cm->claim_region(_task_id);
       if (claimed_region != NULL) {
         // Yes, we managed to claim one
         statsOnly( ++_regions_claimed );
 
-        if (_cm->verbose_low())
+        if (_cm->verbose_low()) {
           gclog_or_tty->print_cr("[%d] we successfully claimed "
                                  "region "PTR_FORMAT,
                                  _task_id, claimed_region);
+        }
 
         setup_for_region(claimed_region);
         assert(_curr_region == claimed_region, "invariant");
@@ -4306,8 +4295,9 @@
     assert(_cm->out_of_regions(),
            "at this point we should be out of regions");
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] all regions claimed", _task_id);
+    }
 
     // Try to reduce the number of available SATB buffers so that
     // remark has less work to do.
@@ -4331,17 +4321,19 @@
     assert(_cm->out_of_regions() && _task_queue->size() == 0,
            "only way to reach here");
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] starting to steal", _task_id);
+    }
 
     while (!has_aborted()) {
       oop obj;
       statsOnly( ++_steal_attempts );
 
       if (_cm->try_stealing(_task_id, &_hash_seed, obj)) {
-        if (_cm->verbose_medium())
+        if (_cm->verbose_medium()) {
           gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully",
                                  _task_id, (void*) obj);
+        }
 
         statsOnly( ++_steals );
 
@@ -4379,8 +4371,9 @@
     assert(_cm->out_of_regions(), "only way to reach here");
     assert(_task_queue->size() == 0, "only way to reach here");
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id);
+    }
 
     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
     // The CMTask class also extends the TerminatorTerminator class,
@@ -4418,14 +4411,17 @@
       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
       guarantee(!_cm->region_stack_overflow(), "only way to reach here");
 
-      if (_cm->verbose_low())
+      if (_cm->verbose_low()) {
         gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id);
+      }
     } else {
       // Apparently there's more work to do. Let's abort this task. It
       // will restart it and we can hopefully find more things to do.
 
-      if (_cm->verbose_low())
-        gclog_or_tty->print_cr("[%d] apparently there is more work to do", _task_id);
+      if (_cm->verbose_low()) {
+        gclog_or_tty->print_cr("[%d] apparently there is more work to do",
+                               _task_id);
+      }
 
       set_has_aborted();
       statsOnly( ++_aborted_termination );
@@ -4435,7 +4431,7 @@
   // Mainly for debugging purposes to make sure that a pointer to the
   // closure which was statically allocated in this frame doesn't
   // escape it by accident.
-  set_oop_closure(NULL);
+  set_cm_oop_closure(NULL);
   double end_time_ms = os::elapsedVTime() * 1000.0;
   double elapsed_time_ms = end_time_ms - _start_time_ms;
   // Update the step history.
@@ -4462,8 +4458,9 @@
       // what they are doing and re-initialise in a safe manner. We
       // will achieve this with the use of two barrier sync points.
 
-      if (_cm->verbose_low())
+      if (_cm->verbose_low()) {
         gclog_or_tty->print_cr("[%d] detected overflow", _task_id);
+      }
 
       _cm->enter_first_sync_barrier(_task_id);
       // When we exit this sync barrier we know that all tasks have
@@ -4486,15 +4483,17 @@
       gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, "
                              "elapsed = %1.2lfms <<<<<<<<<<",
                              _task_id, _time_target_ms, elapsed_time_ms);
-      if (_cm->has_aborted())
+      if (_cm->has_aborted()) {
         gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========",
                                _task_id);
+      }
     }
   } else {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, "
                              "elapsed = %1.2lfms <<<<<<<<<<",
                              _task_id, _time_target_ms, elapsed_time_ms);
+    }
   }
 
   _claimed = false;
@@ -4510,7 +4509,7 @@
     _nextMarkBitMap(NULL), _hash_seed(17),
     _task_queue(task_queue),
     _task_queues(task_queues),
-    _oop_closure(NULL),
+    _cm_oop_closure(NULL),
     _aborted_region(MemRegion()) {
   guarantee(task_queue != NULL, "invariant");
   guarantee(task_queues != NULL, "invariant");
@@ -4574,7 +4573,7 @@
                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
                  g1_committed.start(), g1_committed.end(),
                  g1_reserved.start(), g1_reserved.end(),
-                 HeapRegion::GrainBytes);
+                 (size_t)HeapRegion::GrainBytes);
   _out->print_cr(G1PPRL_LINE_PREFIX);
   _out->print_cr(G1PPRL_LINE_PREFIX
                  G1PPRL_TYPE_H_FORMAT
@@ -4585,6 +4584,15 @@
                  G1PPRL_DOUBLE_H_FORMAT,
                  "type", "address-range",
                  "used", "prev-live", "next-live", "gc-eff");
+  _out->print_cr(G1PPRL_LINE_PREFIX
+                 G1PPRL_TYPE_H_FORMAT
+                 G1PPRL_ADDR_BASE_H_FORMAT
+                 G1PPRL_BYTE_H_FORMAT
+                 G1PPRL_BYTE_H_FORMAT
+                 G1PPRL_BYTE_H_FORMAT
+                 G1PPRL_DOUBLE_H_FORMAT,
+                 "", "",
+                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
 }
 
 // It takes as a parameter a reference to one of the _hum_* fields, it
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -131,22 +131,22 @@
   void mark(HeapWord* addr) {
     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
            "outside underlying space?");
-    _bm.at_put(heapWordToOffset(addr), true);
+    _bm.set_bit(heapWordToOffset(addr));
   }
   void clear(HeapWord* addr) {
     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
            "outside underlying space?");
-    _bm.at_put(heapWordToOffset(addr), false);
+    _bm.clear_bit(heapWordToOffset(addr));
   }
   bool parMark(HeapWord* addr) {
     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
            "outside underlying space?");
-    return _bm.par_at_put(heapWordToOffset(addr), true);
+    return _bm.par_set_bit(heapWordToOffset(addr));
   }
   bool parClear(HeapWord* addr) {
     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
            "outside underlying space?");
-    return _bm.par_at_put(heapWordToOffset(addr), false);
+    return _bm.par_clear_bit(heapWordToOffset(addr));
   }
   void markRange(MemRegion mr);
   void clearAll();
@@ -366,8 +366,8 @@
   friend class CMConcurrentMarkingTask;
   friend class G1ParNoteEndTask;
   friend class CalcLiveObjectsClosure;
-  friend class G1RefProcTaskProxy;
-  friend class G1RefProcTaskExecutor;
+  friend class G1CMRefProcTaskProxy;
+  friend class G1CMRefProcTaskExecutor;
   friend class G1CMParKeepAliveAndDrainClosure;
   friend class G1CMParDrainMarkingStackClosure;
 
@@ -605,10 +605,10 @@
   void mark_stack_pop(oop* arr, int max, int* n) {
     _markStack.par_pop_arr(arr, max, n);
   }
-  size_t mark_stack_size()              { return _markStack.size(); }
+  size_t mark_stack_size()                { return _markStack.size(); }
   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
-  bool mark_stack_overflow()            { return _markStack.overflow(); }
-  bool mark_stack_empty()               { return _markStack.isEmpty(); }
+  bool mark_stack_overflow()              { return _markStack.overflow(); }
+  bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 
   // (Lock-free) Manipulation of the region stack
   bool region_stack_push_lock_free(MemRegion mr) {
@@ -736,12 +736,14 @@
   // will dump the contents of its reference fields, as well as
   // liveness information for the object and its referents. The dump
   // will be written to a file with the following name:
-  // G1PrintReachableBaseFile + "." + str. use_prev_marking decides
-  // whether the prev (use_prev_marking == true) or next
-  // (use_prev_marking == false) marking information will be used to
-  // determine the liveness of each object / referent. If all is true,
-  // all objects in the heap will be dumped, otherwise only the live
-  // ones. In the dump the following symbols / abbreviations are used:
+  // G1PrintReachableBaseFile + "." + str.
+  // vo decides whether the prev (vo == UsePrevMarking), the next
+  // (vo == UseNextMarking) marking information, or the mark word
+  // (vo == UseMarkWord) will be used to determine the liveness of
+  // each object / referent.
+  // If all is true, all objects in the heap will be dumped, otherwise
+  // only the live ones. In the dump the following symbols / breviations
+  // are used:
   //   M : an explicitly live object (its bitmap bit is set)
   //   > : an implicitly live object (over tams)
   //   O : an object outside the G1 heap (typically: in the perm gen)
@@ -749,14 +751,11 @@
   //   AND MARKED : indicates that an object is both explicitly and
   //   implicitly live (it should be one or the other, not both)
   void print_reachable(const char* str,
-                       bool use_prev_marking, bool all) PRODUCT_RETURN;
+                       VerifyOption vo, bool all) PRODUCT_RETURN;
 
   // Clear the next marking bitmap (will be called concurrently).
   void clearNextBitmap();
 
-  // main CMS steps and related support
-  void checkpointRootsInitial();
-
   // These two do the work that needs to be done before and after the
   // initial root checkpoint. Since this checkpoint can be done at two
   // different points (i.e. an explicit pause or piggy-backed on a
@@ -831,8 +830,9 @@
     // _min_finger then we need to gray objects.
     // This routine is like registerCSetRegion but for an entire
     // collection of regions.
-    if (max_finger > _min_finger)
+    if (max_finger > _min_finger) {
       _should_gray_objects = true;
+    }
   }
 
   // Returns "true" if at least one mark has been completed.
@@ -878,14 +878,18 @@
   // The following indicate whether a given verbose level has been
   // set. Notice that anything above stats is conditional to
   // _MARKING_VERBOSE_ having been set to 1
-  bool verbose_stats()
-    { return _verbose_level >= stats_verbose; }
-  bool verbose_low()
-    { return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; }
-  bool verbose_medium()
-    { return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; }
-  bool verbose_high()
-    { return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; }
+  bool verbose_stats() {
+    return _verbose_level >= stats_verbose;
+  }
+  bool verbose_low() {
+    return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
+  }
+  bool verbose_medium() {
+    return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
+  }
+  bool verbose_high() {
+    return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
+  }
 };
 
 // A class representing a marking task.
@@ -928,7 +932,7 @@
   double                      _start_time_ms;
 
   // the oop closure used for iterations over oops
-  OopClosure*                 _oop_closure;
+  G1CMOopClosure*             _cm_oop_closure;
 
   // the region this task is scanning, NULL if we're not scanning any
   HeapRegion*                 _curr_region;
@@ -1061,8 +1065,9 @@
   // respective limit and calls reached_limit() if they have
   void check_limits() {
     if (_words_scanned >= _words_scanned_limit ||
-        _refs_reached >= _refs_reached_limit)
+        _refs_reached >= _refs_reached_limit) {
       reached_limit();
+    }
   }
   // this is supposed to be called regularly during a marking step as
   // it checks a bunch of conditions that might cause the marking step
@@ -1122,32 +1127,17 @@
   // Clears any recorded partially scanned region
   void clear_aborted_region()   { set_aborted_region(MemRegion()); }
 
-  void set_oop_closure(OopClosure* oop_closure) {
-    _oop_closure = oop_closure;
-  }
+  void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
 
   // It grays the object by marking it and, if necessary, pushing it
   // on the local queue
-  void deal_with_reference(oop obj);
+  inline void deal_with_reference(oop obj);
 
   // It scans an object and visits its children.
-  void scan_object(oop obj) {
-    assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
-
-    if (_cm->verbose_high())
-      gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
-                             _task_id, (void*) obj);
-
-    size_t obj_size = obj->size();
-    _words_scanned += obj_size;
-
-    obj->oop_iterate(_oop_closure);
-    statsOnly( ++_objs_scanned );
-    check_limits();
-  }
+  void scan_object(oop obj);
 
   // It pushes an object on the local queue.
-  void push(oop obj);
+  inline void push(oop obj);
 
   // These two move entries to/from the global stack.
   void move_entries_to_global_stack();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
+
+#include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+
+inline void CMTask::push(oop obj) {
+  HeapWord* objAddr = (HeapWord*) obj;
+  assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
+  assert(!_g1h->is_on_master_free_list(
+              _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
+  assert(!_g1h->is_obj_ill(obj), "invariant");
+  assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
+
+  if (_cm->verbose_high()) {
+    gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
+  }
+
+  if (!_task_queue->push(obj)) {
+    // The local task queue looks full. We need to push some entries
+    // to the global stack.
+
+    if (_cm->verbose_medium()) {
+      gclog_or_tty->print_cr("[%d] task queue overflow, "
+                             "moving entries to the global stack",
+                             _task_id);
+    }
+    move_entries_to_global_stack();
+
+    // this should succeed since, even if we overflow the global
+    // stack, we should have definitely removed some entries from the
+    // local queue. So, there must be space on it.
+    bool success = _task_queue->push(obj);
+    assert(success, "invariant");
+  }
+
+  statsOnly( int tmp_size = _task_queue->size();
+             if (tmp_size > _local_max_size) {
+               _local_max_size = tmp_size;
+             }
+             ++_local_pushes );
+}
+
+// This determines whether the method below will check both the local
+// and global fingers when determining whether to push on the stack a
+// gray object (value 1) or whether it will only check the global one
+// (value 0). The tradeoffs are that the former will be a bit more
+// accurate and possibly push less on the stack, but it might also be
+// a little bit slower.
+
+#define _CHECK_BOTH_FINGERS_      1
+
+inline void CMTask::deal_with_reference(oop obj) {
+  if (_cm->verbose_high()) {
+    gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT,
+                           _task_id, (void*) obj);
+  }
+
+  ++_refs_reached;
+
+  HeapWord* objAddr = (HeapWord*) obj;
+  assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
+ if (_g1h->is_in_g1_reserved(objAddr)) {
+    assert(obj != NULL, "null check is implicit");
+    if (!_nextMarkBitMap->isMarked(objAddr)) {
+      // Only get the containing region if the object is not marked on the
+      // bitmap (otherwise, it's a waste of time since we won't do
+      // anything with it).
+      HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
+      if (!hr->obj_allocated_since_next_marking(obj)) {
+        if (_cm->verbose_high()) {
+          gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked",
+                                 _task_id, (void*) obj);
+        }
+
+        // we need to mark it first
+        if (_nextMarkBitMap->parMark(objAddr)) {
+          // No OrderAccess:store_load() is needed. It is implicit in the
+          // CAS done in parMark(objAddr) above
+          HeapWord* global_finger = _cm->finger();
+
+#if _CHECK_BOTH_FINGERS_
+          // we will check both the local and global fingers
+
+          if (_finger != NULL && objAddr < _finger) {
+            if (_cm->verbose_high()) {
+              gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), "
+                                     "pushing it", _task_id, _finger);
+            }
+            push(obj);
+          } else if (_curr_region != NULL && objAddr < _region_limit) {
+            // do nothing
+          } else if (objAddr < global_finger) {
+            // Notice that the global finger might be moving forward
+            // concurrently. This is not a problem. In the worst case, we
+            // mark the object while it is above the global finger and, by
+            // the time we read the global finger, it has moved forward
+            // passed this object. In this case, the object will probably
+            // be visited when a task is scanning the region and will also
+            // be pushed on the stack. So, some duplicate work, but no
+            // correctness problems.
+
+            if (_cm->verbose_high()) {
+              gclog_or_tty->print_cr("[%d] below the global finger "
+                                     "("PTR_FORMAT"), pushing it",
+                                     _task_id, global_finger);
+            }
+            push(obj);
+          } else {
+            // do nothing
+          }
+#else // _CHECK_BOTH_FINGERS_
+          // we will only check the global finger
+
+          if (objAddr < global_finger) {
+            // see long comment above
+
+            if (_cm->verbose_high()) {
+              gclog_or_tty->print_cr("[%d] below the global finger "
+                                     "("PTR_FORMAT"), pushing it",
+                                     _task_id, global_finger);
+            }
+            push(obj);
+          }
+#endif // _CHECK_BOTH_FINGERS_
+        }
+      }
+    }
+  }
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -50,19 +50,6 @@
   create_and_start();
 }
 
-class CMCheckpointRootsInitialClosure: public VoidClosure {
-
-  ConcurrentMark* _cm;
-public:
-
-  CMCheckpointRootsInitialClosure(ConcurrentMark* cm) :
-    _cm(cm) {}
-
-  void do_void(){
-    _cm->checkpointRootsInitial();
-  }
-};
-
 class CMCheckpointRootsFinalClosure: public VoidClosure {
 
   ConcurrentMark* _cm;
@@ -116,27 +103,6 @@
         gclog_or_tty->print_cr("[GC concurrent-mark-start]");
       }
 
-      if (!g1_policy->in_young_gc_mode()) {
-        // this ensures the flag is not set if we bail out of the marking
-        // cycle; normally the flag is cleared immediately after cleanup
-        g1h->set_marking_complete();
-
-        if (g1_policy->adaptive_young_list_length()) {
-          double now = os::elapsedTime();
-          double init_prediction_ms = g1_policy->predict_init_time_ms();
-          jlong sleep_time_ms = mmu_tracker->when_ms(now, init_prediction_ms);
-          os::sleep(current_thread, sleep_time_ms, false);
-        }
-
-        // We don't have to skip here if we've been asked to restart, because
-        // in the worst case we just enqueue a new VM operation to start a
-        // marking.  Note that the init operation resets has_aborted()
-        CMCheckpointRootsInitialClosure init_cl(_cm);
-        strcpy(verbose_str, "GC initial-mark");
-        VM_CGC_Operation op(&init_cl, verbose_str);
-        VMThread::execute(&op);
-      }
-
       int iter = 0;
       do {
         iter++;
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -129,6 +129,7 @@
     // region in _alloc_region. This is the reason why an active region
     // can never be empty.
     _alloc_region = new_alloc_region;
+    _count += 1;
     trace("region allocation successful");
     return result;
   } else {
@@ -139,8 +140,8 @@
 }
 
 void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
-  msg->append("[%s] %s b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
-              _name, message, BOOL_TO_STR(_bot_updates),
+  msg->append("[%s] %s c: "SIZE_FORMAT" b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
+              _name, message, _count, BOOL_TO_STR(_bot_updates),
               _alloc_region, _used_bytes_before);
 }
 
@@ -148,16 +149,34 @@
   trace("initializing");
   assert(_alloc_region == NULL && _used_bytes_before == 0,
          ar_ext_msg(this, "pre-condition"));
-  assert(_dummy_region != NULL, "should have been set");
+  assert(_dummy_region != NULL, ar_ext_msg(this, "should have been set"));
   _alloc_region = _dummy_region;
+  _count = 0;
   trace("initialized");
 }
 
+void G1AllocRegion::set(HeapRegion* alloc_region) {
+  trace("setting");
+  // We explicitly check that the region is not empty to make sure we
+  // maintain the "the alloc region cannot be empty" invariant.
+  assert(alloc_region != NULL && !alloc_region->is_empty(),
+         ar_ext_msg(this, "pre-condition"));
+  assert(_alloc_region == _dummy_region &&
+         _used_bytes_before == 0 && _count == 0,
+         ar_ext_msg(this, "pre-condition"));
+
+  _used_bytes_before = alloc_region->used();
+  _alloc_region = alloc_region;
+  _count += 1;
+  trace("set");
+}
+
 HeapRegion* G1AllocRegion::release() {
   trace("releasing");
   HeapRegion* alloc_region = _alloc_region;
   retire(false /* fill_up */);
-  assert(_alloc_region == _dummy_region, "post-condition of retire()");
+  assert(_alloc_region == _dummy_region,
+         ar_ext_msg(this, "post-condition of retire()"));
   _alloc_region = NULL;
   trace("released");
   return (alloc_region == _dummy_region) ? NULL : alloc_region;
@@ -196,7 +215,8 @@
       jio_snprintf(rest_buffer, buffer_length, "");
     }
 
-    tty->print_cr("[%s] %s : %s %s", _name, hr_buffer, str, rest_buffer);
+    tty->print_cr("[%s] "SIZE_FORMAT" %s : %s %s",
+                  _name, _count, hr_buffer, str, rest_buffer);
   }
 }
 #endif // G1_ALLOC_REGION_TRACING
@@ -204,5 +224,5 @@
 G1AllocRegion::G1AllocRegion(const char* name,
                              bool bot_updates)
   : _name(name), _bot_updates(bot_updates),
-    _alloc_region(NULL), _used_bytes_before(0) { }
+    _alloc_region(NULL), _count(0), _used_bytes_before(0) { }
 
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -36,7 +36,7 @@
 
 // A class that holds a region that is active in satisfying allocation
 // requests, potentially issued in parallel. When the active region is
-// full it will be retired it replaced with a new one. The
+// full it will be retired and replaced with a new one. The
 // implementation assumes that fast-path allocations will be lock-free
 // and a lock will need to be taken when the active region needs to be
 // replaced.
@@ -57,13 +57,22 @@
   // correct use of init() and release()).
   HeapRegion* _alloc_region;
 
+  // It keeps track of the distinct number of regions that are used
+  // for allocation in the active interval of this object, i.e.,
+  // between a call to init() and a call to release(). The count
+  // mostly includes regions that are freshly allocated, as well as
+  // the region that is re-used using the set() method. This count can
+  // be used in any heuristics that might want to bound how many
+  // distinct regions this object can used during an active interval.
+  size_t _count;
+
   // When we set up a new active region we save its used bytes in this
   // field so that, when we retire it, we can calculate how much space
   // we allocated in it.
   size_t _used_bytes_before;
 
-  // Specifies whether the allocate calls will do BOT updates or not.
-  bool _bot_updates;
+  // When true, indicates that allocate calls should do BOT updates.
+  const bool _bot_updates;
 
   // Useful for debugging and tracing.
   const char* _name;
@@ -127,6 +136,8 @@
     return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
   }
 
+  size_t count() { return _count; }
+
   // The following two are the building blocks for the allocation method.
 
   // First-level allocation: Should be called without holding a
@@ -153,6 +164,12 @@
   // Should be called before we start using this object.
   void init();
 
+  // This can be used to set the active region to a specific
+  // region. (Use Example: we try to retain the last old GC alloc
+  // region that we've used during a GC and we can use set() to
+  // re-instate it at the beginning of the next GC.)
+  void set(HeapRegion* alloc_region);
+
   // Should be called when we want to release the active region which
   // is returned after it's been retired.
   HeapRegion* release();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -31,6 +31,7 @@
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1ErgoVerbose.hpp"
 #include "gc_implementation/g1/g1MarkSweep.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
@@ -41,6 +42,7 @@
 #include "memory/gcLocker.inline.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/generationSpec.hpp"
+#include "memory/referenceProcessor.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
 #include "runtime/aprofiler.hpp"
@@ -577,43 +579,27 @@
     res = new_region_try_secondary_free_list();
   }
   if (res == NULL && do_expand) {
+    ergo_verbose1(ErgoHeapSizing,
+                  "attempt heap expansion",
+                  ergo_format_reason("region allocation request failed")
+                  ergo_format_byte("allocation request"),
+                  word_size * HeapWordSize);
     if (expand(word_size * HeapWordSize)) {
-      // The expansion succeeded and so we should have at least one
-      // region on the free list.
-      res = _free_list.remove_head();
-    }
-  }
-  if (res != NULL) {
-    if (G1PrintHeapRegions) {
-      gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], "
-                             "top "PTR_FORMAT, res->hrs_index(),
-                             res->bottom(), res->end(), res->top());
+      // Even though the heap was expanded, it might not have reached
+      // the desired size. So, we cannot assume that the allocation
+      // will succeed.
+      res = _free_list.remove_head_or_null();
     }
   }
   return res;
 }
 
-HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
-                                                 size_t word_size) {
-  HeapRegion* alloc_region = NULL;
-  if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
-    alloc_region = new_region(word_size, true /* do_expand */);
-    if (purpose == GCAllocForSurvived && alloc_region != NULL) {
-      alloc_region->set_survivor();
-    }
-    ++_gc_alloc_region_counts[purpose];
-  } else {
-    g1_policy()->note_alloc_region_limit_reached(purpose);
-  }
-  return alloc_region;
-}
-
-int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
-                                                       size_t word_size) {
+size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
+                                                          size_t word_size) {
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
-  int first = -1;
+  size_t first = G1_NULL_HRS_INDEX;
   if (num_regions == 1) {
     // Only one region to allocate, no need to go through the slower
     // path. The caller will attempt the expasion if this fails, so
@@ -622,7 +608,7 @@
     if (hr != NULL) {
       first = hr->hrs_index();
     } else {
-      first = -1;
+      first = G1_NULL_HRS_INDEX;
     }
   } else {
     // We can't allocate humongous regions while cleanupComplete() is
@@ -637,10 +623,10 @@
     append_secondary_free_list_if_not_empty_with_lock();
 
     if (free_regions() >= num_regions) {
-      first = _hrs->find_contiguous(num_regions);
-      if (first != -1) {
-        for (int i = first; i < first + (int) num_regions; ++i) {
-          HeapRegion* hr = _hrs->at(i);
+      first = _hrs.find_contiguous(num_regions);
+      if (first != G1_NULL_HRS_INDEX) {
+        for (size_t i = first; i < first + num_regions; ++i) {
+          HeapRegion* hr = region_at(i);
           assert(hr->is_empty(), "sanity");
           assert(is_on_master_free_list(hr), "sanity");
           hr->set_pending_removal(true);
@@ -653,15 +639,15 @@
 }
 
 HeapWord*
-G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
+G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
                                                            size_t num_regions,
                                                            size_t word_size) {
-  assert(first != -1, "pre-condition");
+  assert(first != G1_NULL_HRS_INDEX, "pre-condition");
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
   // Index of last region in the series + 1.
-  int last = first + (int) num_regions;
+  size_t last = first + num_regions;
 
   // We need to initialize the region(s) we just discovered. This is
   // a bit tricky given that it can happen concurrently with
@@ -676,7 +662,7 @@
   assert(word_size <= word_size_sum, "sanity");
 
   // This will be the "starts humongous" region.
-  HeapRegion* first_hr = _hrs->at(first);
+  HeapRegion* first_hr = region_at(first);
   // The header of the new object will be placed at the bottom of
   // the first region.
   HeapWord* new_obj = first_hr->bottom();
@@ -711,8 +697,8 @@
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
   HeapRegion* hr = NULL;
-  for (int i = first + 1; i < last; ++i) {
-    hr = _hrs->at(i);
+  for (size_t i = first + 1; i < last; ++i) {
+    hr = region_at(i);
     hr->set_continuesHumongous(first_hr);
   }
   // If we have "continues humongous" regions (hr != NULL), then the
@@ -733,6 +719,17 @@
   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
          "new_top should be in this region");
   first_hr->set_top(new_top);
+  if (_hr_printer.is_active()) {
+    HeapWord* bottom = first_hr->bottom();
+    HeapWord* end = first_hr->orig_end();
+    if ((first + 1) == last) {
+      // the series has a single humongous region
+      _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
+    } else {
+      // the series has more than one humongous regions
+      _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
+    }
+  }
 
   // Now, we will update the top fields of the "continues humongous"
   // regions. The reason we need to do this is that, otherwise,
@@ -746,17 +743,19 @@
   // last one) is actually used when we will free up the humongous
   // region in free_humongous_region().
   hr = NULL;
-  for (int i = first + 1; i < last; ++i) {
-    hr = _hrs->at(i);
+  for (size_t i = first + 1; i < last; ++i) {
+    hr = region_at(i);
     if ((i + 1) == last) {
       // last continues humongous region
       assert(hr->bottom() < new_top && new_top <= hr->end(),
              "new_top should fall on this region");
       hr->set_top(new_top);
+      _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
     } else {
       // not last one
       assert(new_top > hr->end(), "new_top should be above this region");
       hr->set_top(hr->end());
+      _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
     }
   }
   // If we have continues humongous regions (hr != NULL), then the
@@ -783,9 +782,9 @@
   size_t num_regions =
          round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   size_t x_size = expansion_regions();
-  size_t fs = _hrs->free_suffix();
-  int first = humongous_obj_allocate_find_first(num_regions, word_size);
-  if (first == -1) {
+  size_t fs = _hrs.free_suffix();
+  size_t first = humongous_obj_allocate_find_first(num_regions, word_size);
+  if (first == G1_NULL_HRS_INDEX) {
     // The only thing we can do now is attempt expansion.
     if (fs + x_size >= num_regions) {
       // If the number of regions we're trying to allocate for this
@@ -798,20 +797,30 @@
       // room available.
       assert(num_regions > fs, "earlier allocation should have succeeded");
 
+      ergo_verbose1(ErgoHeapSizing,
+                    "attempt heap expansion",
+                    ergo_format_reason("humongous allocation request failed")
+                    ergo_format_byte("allocation request"),
+                    word_size * HeapWordSize);
       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
+        // Even though the heap was expanded, it might not have
+        // reached the desired size. So, we cannot assume that the
+        // allocation will succeed.
         first = humongous_obj_allocate_find_first(num_regions, word_size);
-        // If the expansion was successful then the allocation
-        // should have been successful.
-        assert(first != -1, "this should have worked");
       }
     }
   }
 
   HeapWord* result = NULL;
-  if (first != -1) {
+  if (first != G1_NULL_HRS_INDEX) {
     result =
       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
     assert(result != NULL, "it should always return a valid result");
+
+    // A successful humongous object allocation changes the used space
+    // information of the old generation so we need to recalculate the
+    // sizes and update the jstat counters here.
+    g1mm()->update_sizes();
   }
 
   verify_region_sets_optional();
@@ -829,12 +838,8 @@
 
 HeapWord*
 G1CollectedHeap::mem_allocate(size_t word_size,
-                              bool   is_noref,
-                              bool   is_tlab,
                               bool*  gc_overhead_limit_was_exceeded) {
   assert_heap_not_locked_and_not_at_safepoint();
-  assert(!is_tlab, "mem_allocate() this should not be called directly "
-         "to allocate TLABs");
 
   // Loop until the allocation is satisified, or unsatisfied after GC.
   for (int try_count = 1; /* we'll return */; try_count += 1) {
@@ -918,6 +923,8 @@
 
       if (GC_locker::is_active_and_needs_gc()) {
         if (g1_policy()->can_expand_young_list()) {
+          // No need for an ergo verbose message here,
+          // can_expand_young_list() does this when it returns true.
           result = _mutator_alloc_region.attempt_allocation_force(word_size,
                                                       false /* bot_updates */);
           if (result != NULL) {
@@ -1083,12 +1090,6 @@
   ShouldNotReachHere();
 }
 
-void G1CollectedHeap::abandon_gc_alloc_regions() {
-  // first, make sure that the GC alloc region list is empty (it should!)
-  assert(_gc_alloc_region_list == NULL, "invariant");
-  release_gc_alloc_regions(true /* totally */);
-}
-
 class PostMCRemSetClearClosure: public HeapRegionClosure {
   ModRefBarrierSet* _mr_bs;
 public:
@@ -1158,6 +1159,35 @@
   }
 };
 
+class PostCompactionPrinterClosure: public HeapRegionClosure {
+private:
+  G1HRPrinter* _hr_printer;
+public:
+  bool doHeapRegion(HeapRegion* hr) {
+    assert(!hr->is_young(), "not expecting to find young regions");
+    // We only generate output for non-empty regions.
+    if (!hr->is_empty()) {
+      if (!hr->isHumongous()) {
+        _hr_printer->post_compaction(hr, G1HRPrinter::Old);
+      } else if (hr->startsHumongous()) {
+        if (hr->capacity() == (size_t) HeapRegion::GrainBytes) {
+          // single humongous region
+          _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
+        } else {
+          _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
+        }
+      } else {
+        assert(hr->continuesHumongous(), "only way to get here");
+        _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
+      }
+    }
+    return false;
+  }
+
+  PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
+    : _hr_printer(hr_printer) { }
+};
+
 bool G1CollectedHeap::do_collection(bool explicit_gc,
                                     bool clear_all_soft_refs,
                                     size_t word_size) {
@@ -1211,20 +1241,20 @@
       HandleMark hm;  // Discard invalid handles created during verification
       gclog_or_tty->print(" VerifyBeforeGC:");
       prepare_for_verify();
-      Universe::verify(true);
+      Universe::verify(/* allow dirty */ true,
+                       /* silent      */ false,
+                       /* option      */ VerifyOption_G1UsePrevMarking);
+
     }
+    pre_full_gc_dump();
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    // We want to discover references, but not process them yet.
-    // This mode is disabled in
-    // instanceRefKlass::process_discovered_references if the
-    // generation does some collection work, or
-    // instanceRefKlass::enqueue_discovered_references if the
-    // generation returns without doing any work.
-    ref_processor()->disable_discovery();
-    ref_processor()->abandon_partial_discovery();
-    ref_processor()->verify_no_references_recorded();
+    // Disable discovery and empty the discovered lists
+    // for the CM ref processor.
+    ref_processor_cm()->disable_discovery();
+    ref_processor_cm()->abandon_partial_discovery();
+    ref_processor_cm()->verify_no_references_recorded();
 
     // Abandon current iterations of concurrent marking and concurrent
     // refinement, if any are in progress.
@@ -1236,6 +1266,11 @@
     g1_rem_set()->cleanupHRRS();
     tear_down_region_lists();
 
+    // We should call this after we retire any currently active alloc
+    // regions so that all the ALLOC / RETIRE events are generated
+    // before the start GC event.
+    _hr_printer.start_gc(true /* full */, (size_t) total_collections());
+
     // We may have added regions to the current incremental collection
     // set between the last GC or pause and now. We need to clear the
     // incremental collection set and then start rebuilding it afresh
@@ -1244,37 +1279,36 @@
     g1_policy()->clear_incremental_cset();
     g1_policy()->stop_incremental_cset_building();
 
-    if (g1_policy()->in_young_gc_mode()) {
-      empty_young_list();
-      g1_policy()->set_full_young_gcs(true);
-    }
-
-    // See the comment in G1CollectedHeap::ref_processing_init() about
+    empty_young_list();
+    g1_policy()->set_full_young_gcs(true);
+
+    // See the comments in g1CollectedHeap.hpp and
+    // G1CollectedHeap::ref_processing_init() about
     // how reference processing currently works in G1.
 
-    // Temporarily make reference _discovery_ single threaded (non-MT).
-    ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
-
-    // Temporarily make refs discovery atomic
-    ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
-
-    // Temporarily clear _is_alive_non_header
-    ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
-
-    ref_processor()->enable_discovery();
-    ref_processor()->setup_policy(do_clear_all_soft_refs);
+    // Temporarily make discovery by the STW ref processor single threaded (non-MT).
+    ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
+
+    // Temporarily clear the STW ref processor's _is_alive_non_header field.
+    ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
+
+    ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+    ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
 
     // Do collection work
     {
       HandleMark hm;  // Discard invalid handles created during gc
-      G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
+      G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
     }
+
     assert(free_regions() == 0, "we should not have added any free regions");
     rebuild_region_lists();
 
     _summary_bytes_used = recalculate_used();
 
-    ref_processor()->enqueue_discovered_references();
+    // Enqueue any discovered reference objects that have
+    // not been removed from the discovered lists.
+    ref_processor_stw()->enqueue_discovered_references();
 
     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 
@@ -1284,9 +1318,21 @@
       HandleMark hm;  // Discard invalid handles created during verification
       gclog_or_tty->print(" VerifyAfterGC:");
       prepare_for_verify();
-      Universe::verify(false);
+      Universe::verify(/* allow dirty */ false,
+                       /* silent      */ false,
+                       /* option      */ VerifyOption_G1UsePrevMarking);
+
     }
-    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
+
+    assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
+    ref_processor_stw()->verify_no_references_recorded();
+
+    // Note: since we've just done a full GC, concurrent
+    // marking is no longer active. Therefore we need not
+    // re-enable reference discovery for the CM ref processor.
+    // That will be done at the start of the next marking cycle.
+    assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
+    ref_processor_cm()->verify_no_references_recorded();
 
     reset_gc_time_stamp();
     // Since everything potentially moved, we will clear all remembered
@@ -1298,6 +1344,17 @@
     // Resize the heap if necessary.
     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
 
+    if (_hr_printer.is_active()) {
+      // We should do this after we potentially resize the heap so
+      // that all the COMMIT / UNCOMMIT events are generated before
+      // the end GC event.
+
+      PostCompactionPrinterClosure cl(hr_printer());
+      heap_region_iterate(&cl);
+
+      _hr_printer.end_gc(true /* full */, (size_t) total_collections());
+    }
+
     if (_cg1r->use_cache()) {
       _cg1r->clear_and_record_card_counts();
       _cg1r->clear_hot_cache();
@@ -1355,23 +1412,23 @@
            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
   }
 
-  if (g1_policy()->in_young_gc_mode()) {
-    _young_list->reset_sampled_info();
-    // At this point there should be no regions in the
-    // entire heap tagged as young.
-    assert( check_young_list_empty(true /* check_heap */),
-            "young list should be empty at this point");
-  }
+  _young_list->reset_sampled_info();
+  // At this point there should be no regions in the
+  // entire heap tagged as young.
+  assert( check_young_list_empty(true /* check_heap */),
+    "young list should be empty at this point");
 
   // Update the number of full collections that have been completed.
   increment_full_collections_completed(false /* concurrent */);
 
+  _hrs.verify_optional();
   verify_region_sets_optional();
 
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
-  g1mm()->update_counters();
+  g1mm()->update_sizes();
+  post_full_gc_dump();
 
   return true;
 }
@@ -1446,63 +1503,34 @@
   // we'll try to make the capacity smaller than it, not greater).
   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
 
-  if (PrintGC && Verbose) {
-    const double free_percentage =
-      (double) free_after_gc / (double) capacity_after_gc;
-    gclog_or_tty->print_cr("Computing new size after full GC ");
-    gclog_or_tty->print_cr("  "
-                           "  minimum_free_percentage: %6.2f",
-                           minimum_free_percentage);
-    gclog_or_tty->print_cr("  "
-                           "  maximum_free_percentage: %6.2f",
-                           maximum_free_percentage);
-    gclog_or_tty->print_cr("  "
-                           "  capacity: %6.1fK"
-                           "  minimum_desired_capacity: %6.1fK"
-                           "  maximum_desired_capacity: %6.1fK",
-                           (double) capacity_after_gc / (double) K,
-                           (double) minimum_desired_capacity / (double) K,
-                           (double) maximum_desired_capacity / (double) K);
-    gclog_or_tty->print_cr("  "
-                           "  free_after_gc: %6.1fK"
-                           "  used_after_gc: %6.1fK",
-                           (double) free_after_gc / (double) K,
-                           (double) used_after_gc / (double) K);
-    gclog_or_tty->print_cr("  "
-                           "   free_percentage: %6.2f",
-                           free_percentage);
-  }
   if (capacity_after_gc < minimum_desired_capacity) {
     // Don't expand unless it's significant
     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
-    if (expand(expand_bytes)) {
-      if (PrintGC && Verbose) {
-        gclog_or_tty->print_cr("  "
-                               "  expanding:"
-                               "  max_heap_size: %6.1fK"
-                               "  minimum_desired_capacity: %6.1fK"
-                               "  expand_bytes: %6.1fK",
-                               (double) max_heap_size / (double) K,
-                               (double) minimum_desired_capacity / (double) K,
-                               (double) expand_bytes / (double) K);
-      }
-    }
+    ergo_verbose4(ErgoHeapSizing,
+                  "attempt heap expansion",
+                  ergo_format_reason("capacity lower than "
+                                     "min desired capacity after Full GC")
+                  ergo_format_byte("capacity")
+                  ergo_format_byte("occupancy")
+                  ergo_format_byte_perc("min desired capacity"),
+                  capacity_after_gc, used_after_gc,
+                  minimum_desired_capacity, (double) MinHeapFreeRatio);
+    expand(expand_bytes);
 
     // No expansion, now see if we want to shrink
   } else if (capacity_after_gc > maximum_desired_capacity) {
     // Capacity too large, compute shrinking size
     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
+    ergo_verbose4(ErgoHeapSizing,
+                  "attempt heap shrinking",
+                  ergo_format_reason("capacity higher than "
+                                     "max desired capacity after Full GC")
+                  ergo_format_byte("capacity")
+                  ergo_format_byte("occupancy")
+                  ergo_format_byte_perc("max desired capacity"),
+                  capacity_after_gc, used_after_gc,
+                  maximum_desired_capacity, (double) MaxHeapFreeRatio);
     shrink(shrink_bytes);
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("  "
-                             "  shrinking:"
-                             "  min_heap_size: %6.1fK"
-                             "  maximum_desired_capacity: %6.1fK"
-                             "  shrink_bytes: %6.1fK",
-                             (double) min_heap_size / (double) K,
-                             (double) maximum_desired_capacity / (double) K,
-                             (double) shrink_bytes / (double) K);
-    }
   }
 }
 
@@ -1588,7 +1616,13 @@
   verify_region_sets_optional();
 
   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
+  ergo_verbose1(ErgoHeapSizing,
+                "attempt heap expansion",
+                ergo_format_reason("allocation request failed")
+                ergo_format_byte("allocation request"),
+                word_size * HeapWordSize);
   if (expand(expand_bytes)) {
+    _hrs.verify_optional();
     verify_region_sets_optional();
     return attempt_allocation_at_safepoint(word_size,
                                  false /* expect_null_mutator_alloc_region */);
@@ -1596,59 +1630,76 @@
   return NULL;
 }
 
+void G1CollectedHeap::update_committed_space(HeapWord* old_end,
+                                             HeapWord* new_end) {
+  assert(old_end != new_end, "don't call this otherwise");
+  assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
+
+  // Update the committed mem region.
+  _g1_committed.set_end(new_end);
+  // Tell the card table about the update.
+  Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
+  // Tell the BOT about the update.
+  _bot_shared->resize(_g1_committed.word_size());
+}
+
 bool G1CollectedHeap::expand(size_t expand_bytes) {
   size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
                                        HeapRegion::GrainBytes);
-
-  if (Verbose && PrintGC) {
-    gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK",
-                           old_mem_size/K, aligned_expand_bytes/K);
-  }
-
-  HeapWord* old_end = (HeapWord*)_g1_storage.high();
+  ergo_verbose2(ErgoHeapSizing,
+                "expand the heap",
+                ergo_format_byte("requested expansion amount")
+                ergo_format_byte("attempted expansion amount"),
+                expand_bytes, aligned_expand_bytes);
+
+  // First commit the memory.
+  HeapWord* old_end = (HeapWord*) _g1_storage.high();
   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
   if (successful) {
-    HeapWord* new_end = (HeapWord*)_g1_storage.high();
-
-    // Expand the committed region.
-    _g1_committed.set_end(new_end);
-
-    // Tell the cardtable about the expansion.
-    Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
-
-    // And the offset table as well.
-    _bot_shared->resize(_g1_committed.word_size());
-
-    expand_bytes = aligned_expand_bytes;
-    HeapWord* base = old_end;
-
-    // Create the heap regions for [old_end, new_end)
-    while (expand_bytes > 0) {
-      HeapWord* high = base + HeapRegion::GrainWords;
-
-      // Create a new HeapRegion.
-      MemRegion mr(base, high);
-      bool is_zeroed = !_g1_max_committed.contains(base);
-      HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
-
-      // Add it to the HeapRegionSeq.
-      _hrs->insert(hr);
-      _free_list.add_as_tail(hr);
-
-      // And we used up an expansion region to create it.
-      _expansion_regions--;
-
-      expand_bytes -= HeapRegion::GrainBytes;
-      base += HeapRegion::GrainWords;
+    // Then propagate this update to the necessary data structures.
+    HeapWord* new_end = (HeapWord*) _g1_storage.high();
+    update_committed_space(old_end, new_end);
+
+    FreeRegionList expansion_list("Local Expansion List");
+    MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
+    assert(mr.start() == old_end, "post-condition");
+    // mr might be a smaller region than what was requested if
+    // expand_by() was unable to allocate the HeapRegion instances
+    assert(mr.end() <= new_end, "post-condition");
+
+    size_t actual_expand_bytes = mr.byte_size();
+    assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
+    assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
+           "post-condition");
+    if (actual_expand_bytes < aligned_expand_bytes) {
+      // We could not expand _hrs to the desired size. In this case we
+      // need to shrink the committed space accordingly.
+      assert(mr.end() < new_end, "invariant");
+
+      size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
+      // First uncommit the memory.
+      _g1_storage.shrink_by(diff_bytes);
+      // Then propagate this update to the necessary data structures.
+      update_committed_space(new_end, mr.end());
     }
-    assert(base == new_end, "sanity");
-
-    // Now update max_committed if necessary.
-    _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end));
-
+    _free_list.add_as_tail(&expansion_list);
+
+    if (_hr_printer.is_active()) {
+      HeapWord* curr = mr.start();
+      while (curr < mr.end()) {
+        HeapWord* curr_end = curr + HeapRegion::GrainWords;
+        _hr_printer.commit(curr, curr_end);
+        curr = curr_end;
+      }
+      assert(curr == mr.end(), "post-condition");
+    }
+    g1_policy()->record_new_heap_size(n_regions());
   } else {
+    ergo_verbose0(ErgoHeapSizing,
+                  "did not expand the heap",
+                  ergo_format_reason("heap expansion operation failed"));
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
@@ -1657,54 +1708,60 @@
       vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
     }
   }
-
-  if (Verbose && PrintGC) {
-    size_t new_mem_size = _g1_storage.committed_size();
-    gclog_or_tty->print_cr("...%s, expanded to %ldK",
-                           (successful ? "Successful" : "Failed"),
-                           new_mem_size/K);
-  }
   return successful;
 }
 
-void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
-{
+void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
   size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_shrink_bytes =
     ReservedSpace::page_align_size_down(shrink_bytes);
   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
                                          HeapRegion::GrainBytes);
   size_t num_regions_deleted = 0;
-  MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
-
-  assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
-  if (mr.byte_size() > 0)
+  MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
+  HeapWord* old_end = (HeapWord*) _g1_storage.high();
+  assert(mr.end() == old_end, "post-condition");
+
+  ergo_verbose3(ErgoHeapSizing,
+                "shrink the heap",
+                ergo_format_byte("requested shrinking amount")
+                ergo_format_byte("aligned shrinking amount")
+                ergo_format_byte("attempted shrinking amount"),
+                shrink_bytes, aligned_shrink_bytes, mr.byte_size());
+  if (mr.byte_size() > 0) {
+    if (_hr_printer.is_active()) {
+      HeapWord* curr = mr.end();
+      while (curr > mr.start()) {
+        HeapWord* curr_end = curr;
+        curr -= HeapRegion::GrainWords;
+        _hr_printer.uncommit(curr, curr_end);
+      }
+      assert(curr == mr.start(), "post-condition");
+    }
+
     _g1_storage.shrink_by(mr.byte_size());
-  assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
-
-  _g1_committed.set_end(mr.start());
-  _expansion_regions += num_regions_deleted;
-
-  // Tell the cardtable about it.
-  Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
-
-  // And the offset table as well.
-  _bot_shared->resize(_g1_committed.word_size());
-
-  HeapRegionRemSet::shrink_heap(n_regions());
-
-  if (Verbose && PrintGC) {
-    size_t new_mem_size = _g1_storage.committed_size();
-    gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
-                           old_mem_size/K, aligned_shrink_bytes/K,
-                           new_mem_size/K);
+    HeapWord* new_end = (HeapWord*) _g1_storage.high();
+    assert(mr.start() == new_end, "post-condition");
+
+    _expansion_regions += num_regions_deleted;
+    update_committed_space(old_end, new_end);
+    HeapRegionRemSet::shrink_heap(n_regions());
+    g1_policy()->record_new_heap_size(n_regions());
+  } else {
+    ergo_verbose0(ErgoHeapSizing,
+                  "did not shrink the heap",
+                  ergo_format_reason("heap shrinking operation failed"));
   }
 }
 
 void G1CollectedHeap::shrink(size_t shrink_bytes) {
   verify_region_sets_optional();
 
-  release_gc_alloc_regions(true /* totally */);
+  // We should only reach here at the end of a Full GC which means we
+  // should not not be holding to any GC alloc regions. The method
+  // below will make sure of that and do any remaining clean up.
+  abandon_gc_alloc_regions();
+
   // Instead of tearing down / rebuilding the free lists here, we
   // could instead use the remove_all_pending() method on free_list to
   // remove only the ones that we need to remove.
@@ -1712,6 +1769,7 @@
   shrink_helper(shrink_bytes);
   rebuild_region_lists();
 
+  _hrs.verify_optional();
   verify_region_sets_optional();
 }
 
@@ -1727,14 +1785,17 @@
   _g1_policy(policy_),
   _dirty_card_queue_set(false),
   _into_cset_dirty_card_queue_set(false),
-  _is_alive_closure(this),
-  _ref_processor(NULL),
+  _is_alive_closure_cm(this),
+  _is_alive_closure_stw(this),
+  _ref_processor_cm(NULL),
+  _ref_processor_stw(NULL),
   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
   _bot_shared(NULL),
   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
   _evac_failure_scan_stack(NULL) ,
   _mark_in_progress(false),
   _cg1r(NULL), _summary_bytes_used(0),
+  _g1mm(NULL),
   _refine_cte_cl(NULL),
   _full_collection(false),
   _free_list("Master Free List"),
@@ -1743,6 +1804,7 @@
   _free_regions_coming(false),
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
+  _retained_old_gc_alloc_region(NULL),
   _surviving_young_words(NULL),
   _full_collections_completed(0),
   _in_cset_fast_test(NULL),
@@ -1774,20 +1836,6 @@
     _task_queues->register_queue(i, q);
   }
 
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    _gc_alloc_regions[ap]          = NULL;
-    _gc_alloc_region_counts[ap]    = 0;
-    _retained_gc_alloc_regions[ap] = NULL;
-    // by default, we do not retain a GC alloc region for each ap;
-    // we'll override this, when appropriate, below
-    _retain_gc_alloc_region[ap]    = false;
-  }
-
-  // We will try to remember the last half-full tenured region we
-  // allocated to at the end of a collection so that we can re-use it
-  // during the next collection.
-  _retain_gc_alloc_region[GCAllocForTenured]  = true;
-
   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 }
 
@@ -1799,6 +1847,10 @@
 
   MutexLocker x(Heap_lock);
 
+  // We have to initialize the printer before committing the heap, as
+  // it will be used then.
+  _hr_printer.set_active(G1PrintHeapRegions);
+
   // While there are no constraints in the GC code that HeapWordSize
   // be any particular value, there are multiple other areas in the
   // system which believe this to be true (e.g. oop->object_size in some
@@ -1819,12 +1871,27 @@
   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
   // Includes the perm-gen.
 
-  const size_t total_reserved = max_byte_size + pgs->max_size();
+  // When compressed oops are enabled, the preferred heap base
+  // is calculated by subtracting the requested size from the
+  // 32Gb boundary and using the result as the base address for
+  // heap reservation. If the requested size is not aligned to
+  // HeapRegion::GrainBytes (i.e. the alignment that is passed
+  // into the ReservedHeapSpace constructor) then the actual
+  // base of the reserved heap may end up differing from the
+  // address that was requested (i.e. the preferred heap base).
+  // If this happens then we could end up using a non-optimal
+  // compressed oops mode.
+
+  // Since max_byte_size is aligned to the size of a heap region (checked
+  // above), we also need to align the perm gen size as it might not be.
+  const size_t total_reserved = max_byte_size +
+                                align_size_up(pgs->max_size(), HeapRegion::GrainBytes);
+  Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm");
+
   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 
-  ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
-                        HeapRegion::GrainBytes,
-                        UseLargePages, addr);
+  ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
+                            UseLargePages, addr);
 
   if (UseCompressedOops) {
     if (addr != NULL && !heap_rs.is_reserved()) {
@@ -1832,14 +1899,17 @@
       // region is taken already, for example, by 'java' launcher.
       // Try again to reserver heap higher.
       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
-      ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
-                             UseLargePages, addr);
+
+      ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
+                                 UseLargePages, addr);
+
       if (addr != NULL && !heap_rs0.is_reserved()) {
         // Failed to reserve at specified address again - give up.
         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
         assert(addr == NULL, "");
-        ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
-                               UseLargePages, addr);
+
+        ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
+                                   UseLargePages, addr);
         heap_rs = heap_rs1;
       } else {
         heap_rs = heap_rs0;
@@ -1890,9 +1960,9 @@
 
   _g1_storage.initialize(g1_rs, 0);
   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
-  _g1_max_committed = _g1_committed;
-  _hrs = new HeapRegionSeq(_expansion_regions);
-  guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
+  _hrs.initialize((HeapWord*) _g1_reserved.start(),
+                  (HeapWord*) _g1_reserved.end(),
+                  _expansion_regions);
 
   // 6843694 - ensure that the maximum region index can fit
   // in the remembered set structures.
@@ -1983,16 +2053,15 @@
   // counts and that mechanism.
   SpecializationStats::clear();
 
-  _gc_alloc_region_list = NULL;
-
   // Do later initialization work for concurrent refinement.
   _cg1r->init();
 
   // Here we allocate the dummy full region that is required by the
   // G1AllocRegion class. If we don't pass an address in the reserved
   // space here, lots of asserts fire.
-  MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords);
-  HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true);
+
+  HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
+                                             _g1_reserved.start());
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
   // region will complain that it cannot support allocations without
@@ -2006,7 +2075,7 @@
 
   // Do create of the monitoring and management support so that
   // values in the heap have been properly initialized.
-  _g1mm = new G1MonitoringSupport(this, &_g1_storage);
+  _g1mm = new G1MonitoringSupport(this);
 
   return JNI_OK;
 }
@@ -2014,34 +2083,81 @@
 void G1CollectedHeap::ref_processing_init() {
   // Reference processing in G1 currently works as follows:
   //
-  // * There is only one reference processor instance that
-  //   'spans' the entire heap. It is created by the code
-  //   below.
-  // * Reference discovery is not enabled during an incremental
-  //   pause (see 6484982).
-  // * Discoverered refs are not enqueued nor are they processed
-  //   during an incremental pause (see 6484982).
-  // * Reference discovery is enabled at initial marking.
-  // * Reference discovery is disabled and the discovered
-  //   references processed etc during remarking.
-  // * Reference discovery is MT (see below).
-  // * Reference discovery requires a barrier (see below).
-  // * Reference processing is currently not MT (see 6608385).
-  // * A full GC enables (non-MT) reference discovery and
-  //   processes any discovered references.
+  // * There are two reference processor instances. One is
+  //   used to record and process discovered references
+  //   during concurrent marking; the other is used to
+  //   record and process references during STW pauses
+  //   (both full and incremental).
+  // * Both ref processors need to 'span' the entire heap as
+  //   the regions in the collection set may be dotted around.
+  //
+  // * For the concurrent marking ref processor:
+  //   * Reference discovery is enabled at initial marking.
+  //   * Reference discovery is disabled and the discovered
+  //     references processed etc during remarking.
+  //   * Reference discovery is MT (see below).
+  //   * Reference discovery requires a barrier (see below).
+  //   * Reference processing may or may not be MT
+  //     (depending on the value of ParallelRefProcEnabled
+  //     and ParallelGCThreads).
+  //   * A full GC disables reference discovery by the CM
+  //     ref processor and abandons any entries on it's
+  //     discovered lists.
+  //
+  // * For the STW processor:
+  //   * Non MT discovery is enabled at the start of a full GC.
+  //   * Processing and enqueueing during a full GC is non-MT.
+  //   * During a full GC, references are processed after marking.
+  //
+  //   * Discovery (may or may not be MT) is enabled at the start
+  //     of an incremental evacuation pause.
+  //   * References are processed near the end of a STW evacuation pause.
+  //   * For both types of GC:
+  //     * Discovery is atomic - i.e. not concurrent.
+  //     * Reference discovery will not need a barrier.
 
   SharedHeap::ref_processing_init();
   MemRegion mr = reserved_region();
-  _ref_processor =
+
+  // Concurrent Mark ref processor
+  _ref_processor_cm =
     new ReferenceProcessor(mr,    // span
-                           ParallelRefProcEnabled && (ParallelGCThreads > 1),    // mt processing
-                           (int) ParallelGCThreads,   // degree of mt processing
-                           ParallelGCThreads > 1 || ConcGCThreads > 1,  // mt discovery
-                           (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
-                           false,                     // Reference discovery is not atomic
-                           &_is_alive_closure,        // is alive closure for efficiency
-                           true);                     // Setting next fields of discovered
-                                                      // lists requires a barrier.
+                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
+                                // mt processing
+                           (int) ParallelGCThreads,
+                                // degree of mt processing
+                           (ParallelGCThreads > 1) || (ConcGCThreads > 1),
+                                // mt discovery
+                           (int) MAX2(ParallelGCThreads, ConcGCThreads),
+                                // degree of mt discovery
+                           false,
+                                // Reference discovery is not atomic
+                           &_is_alive_closure_cm,
+                                // is alive closure
+                                // (for efficiency/performance)
+                           true);
+                                // Setting next fields of discovered
+                                // lists requires a barrier.
+
+  // STW ref processor
+  _ref_processor_stw =
+    new ReferenceProcessor(mr,    // span
+                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
+                                // mt processing
+                           MAX2((int)ParallelGCThreads, 1),
+                                // degree of mt processing
+                           (ParallelGCThreads > 1),
+                                // mt discovery
+                           MAX2((int)ParallelGCThreads, 1),
+                                // degree of mt discovery
+                           true,
+                                // Reference discovery is atomic
+                           &_is_alive_closure_stw,
+                                // is alive closure
+                                // (for efficiency/performance)
+                           false);
+                                // Setting next fields of discovered
+                                // lists requires a barrier.
 }
 
 size_t G1CollectedHeap::capacity() const {
@@ -2100,31 +2216,10 @@
 
 size_t G1CollectedHeap::recalculate_used() const {
   SumUsedClosure blk;
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
   return blk.result();
 }
 
-#ifndef PRODUCT
-class SumUsedRegionsClosure: public HeapRegionClosure {
-  size_t _num;
-public:
-  SumUsedRegionsClosure() : _num(0) {}
-  bool doHeapRegion(HeapRegion* r) {
-    if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
-      _num += 1;
-    }
-    return false;
-  }
-  size_t result() { return _num; }
-};
-
-size_t G1CollectedHeap::recalculate_used_regions() const {
-  SumUsedRegionsClosure blk;
-  _hrs->iterate(&blk);
-  return blk.result();
-}
-#endif // PRODUCT
-
 size_t G1CollectedHeap::unsafe_max_alloc() {
   if (free_regions() > 0) return HeapRegion::GrainBytes;
   // otherwise, is there space in the current allocation region?
@@ -2285,8 +2380,8 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_g1_committed.contains(p)) {
-    HeapRegion* hr = _hrs->addr_to_region(p);
+  HeapRegion* hr = _hrs.addr_to_region((HeapWord*) p);
+  if (hr != NULL) {
     return hr->is_in(p);
   } else {
     return _perm_gen->as_gen()->is_in(p);
@@ -2314,7 +2409,7 @@
 
 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
   IterateOopClosureRegionClosure blk(_g1_committed, cl);
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
   if (do_perm) {
     perm_gen()->oop_iterate(cl);
   }
@@ -2322,7 +2417,7 @@
 
 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
   IterateOopClosureRegionClosure blk(mr, cl);
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
   if (do_perm) {
     perm_gen()->oop_iterate(cl);
   }
@@ -2344,7 +2439,7 @@
 
 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
   IterateObjectClosureRegionClosure blk(cl);
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
   if (do_perm) {
     perm_gen()->object_iterate(cl);
   }
@@ -2369,24 +2464,17 @@
 
 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
   SpaceClosureRegionClosure blk(cl);
-  _hrs->iterate(&blk);
-}
-
-void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
-  _hrs->iterate(cl);
+  heap_region_iterate(&blk);
+}
+
+void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
+  _hrs.iterate(cl);
 }
 
 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
-                                               HeapRegionClosure* cl) {
-  _hrs->iterate_from(r, cl);
-}
-
-void
-G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
-  _hrs->iterate_from(idx, cl);
-}
-
-HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
+                                               HeapRegionClosure* cl) const {
+  _hrs.iterate_from(r, cl);
+}
 
 void
 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
@@ -2568,7 +2656,7 @@
 }
 
 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
-  return _hrs->length() > 0 ? _hrs->at(0) : NULL;
+  return n_regions() > 0 ? region_at(0) : NULL;
 }
 
 
@@ -2623,11 +2711,6 @@
   }
 }
 
-size_t G1CollectedHeap::large_typearray_limit() {
-  // FIXME
-  return HeapRegion::GrainBytes/HeapWordSize;
-}
-
 size_t G1CollectedHeap::max_capacity() const {
   return _g1_reserved.byte_size();
 }
@@ -2645,17 +2728,18 @@
 }
 
 class VerifyLivenessOopClosure: public OopClosure {
-  G1CollectedHeap* g1h;
+  G1CollectedHeap* _g1h;
+  VerifyOption _vo;
 public:
-  VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
-    g1h = _g1h;
-  }
+  VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
+    _g1h(g1h), _vo(vo)
+  { }
   void do_oop(narrowOop *p) { do_oop_work(p); }
   void do_oop(      oop *p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T *p) {
     oop obj = oopDesc::load_decode_heap_oop(p);
-    guarantee(obj == NULL || !g1h->is_obj_dead(obj),
+    guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
               "Dead object referenced by a not dead object");
   }
 };
@@ -2665,18 +2749,30 @@
   G1CollectedHeap* _g1h;
   size_t _live_bytes;
   HeapRegion *_hr;
-  bool _use_prev_marking;
+  VerifyOption _vo;
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking)
-    : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) {
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
+    : _live_bytes(0), _hr(hr), _vo(vo) {
     _g1h = G1CollectedHeap::heap();
   }
   void do_object(oop o) {
-    VerifyLivenessOopClosure isLive(_g1h);
+    VerifyLivenessOopClosure isLive(_g1h, _vo);
     assert(o != NULL, "Huh?");
-    if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
+    if (!_g1h->is_obj_dead_cond(o, _vo)) {
+      // If the object is alive according to the mark word,
+      // then verify that the marking information agrees.
+      // Note we can't verify the contra-positive of the
+      // above: if the object is dead (according to the mark
+      // word), it may not be marked, or may have been marked
+      // but has since became dead, or may have been allocated
+      // since the last marking.
+      if (_vo == VerifyOption_G1UseMarkWord) {
+        guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
+      }
+
       o->oop_iterate(&isLive);
       if (!_hr->obj_allocated_since_prev_marking(o)) {
         size_t obj_size = o->size();    // Make sure we don't overflow
@@ -2718,17 +2814,18 @@
 
 class VerifyRegionClosure: public HeapRegionClosure {
 private:
-  bool _allow_dirty;
-  bool _par;
-  bool _use_prev_marking;
-  bool _failures;
+  bool         _allow_dirty;
+  bool         _par;
+  VerifyOption _vo;
+  bool         _failures;
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo)
     : _allow_dirty(allow_dirty),
       _par(par),
-      _use_prev_marking(use_prev_marking),
+      _vo(vo),
       _failures(false) {}
 
   bool failures() {
@@ -2740,11 +2837,11 @@
               "Should be unclaimed at verify points.");
     if (!r->continuesHumongous()) {
       bool failures = false;
-      r->verify(_allow_dirty, _use_prev_marking, &failures);
+      r->verify(_allow_dirty, _vo, &failures);
       if (failures) {
         _failures = true;
       } else {
-        VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
+        VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
         r->object_iterate(&not_dead_yet_cl);
         if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
           gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
@@ -2764,14 +2861,15 @@
 class VerifyRootsClosure: public OopsInGenClosure {
 private:
   G1CollectedHeap* _g1h;
-  bool             _use_prev_marking;
+  VerifyOption     _vo;
   bool             _failures;
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  VerifyRootsClosure(bool use_prev_marking) :
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyRootsClosure(VerifyOption vo) :
     _g1h(G1CollectedHeap::heap()),
-    _use_prev_marking(use_prev_marking),
+    _vo(vo),
     _failures(false) { }
 
   bool failures() { return _failures; }
@@ -2780,9 +2878,12 @@
     T heap_oop = oopDesc::load_heap_oop(p);
     if (!oopDesc::is_null(heap_oop)) {
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
+      if (_g1h->is_obj_dead_cond(obj, _vo)) {
         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
+        if (_vo == VerifyOption_G1UseMarkWord) {
+          gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
+        }
         obj->print_on(gclog_or_tty);
         _failures = true;
       }
@@ -2798,19 +2899,19 @@
 class G1ParVerifyTask: public AbstractGangTask {
 private:
   G1CollectedHeap* _g1h;
-  bool _allow_dirty;
-  bool _use_prev_marking;
-  bool _failures;
+  bool             _allow_dirty;
+  VerifyOption     _vo;
+  bool             _failures;
 
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
-                  bool use_prev_marking) :
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) :
     AbstractGangTask("Parallel verify task"),
     _g1h(g1h),
     _allow_dirty(allow_dirty),
-    _use_prev_marking(use_prev_marking),
+    _vo(vo),
     _failures(false) { }
 
   bool failures() {
@@ -2819,7 +2920,7 @@
 
   void work(int worker_i) {
     HandleMark hm;
-    VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
+    VerifyRegionClosure blk(_allow_dirty, true, _vo);
     _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
                                           HeapRegion::ParVerifyClaimValue);
     if (blk.failures()) {
@@ -2829,19 +2930,21 @@
 };
 
 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
-  verify(allow_dirty, silent, /* use_prev_marking */ true);
+  verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking);
 }
 
 void G1CollectedHeap::verify(bool allow_dirty,
                              bool silent,
-                             bool use_prev_marking) {
+                             VerifyOption vo) {
   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
     if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
-    VerifyRootsClosure rootsCl(use_prev_marking);
+    VerifyRootsClosure rootsCl(vo);
     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
+
     // We apply the relevant closures to all the oops in the
     // system dictionary, the string table and the code cache.
     const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+
     process_strong_roots(true,      // activate StrongRootsScope
                          true,      // we set "collecting perm gen" to true,
                                     // so we don't reset the dirty cards in the perm gen.
@@ -2849,21 +2952,37 @@
                          &rootsCl,
                          &blobsCl,
                          &rootsCl);
-    // Since we used "collecting_perm_gen" == true above, we will not have
-    // checked the refs from perm into the G1-collected heap. We check those
-    // references explicitly below. Whether the relevant cards are dirty
-    // is checked further below in the rem set verification.
-    if (!silent) { gclog_or_tty->print("Permgen roots "); }
-    perm_gen()->oop_iterate(&rootsCl);
+
+    // If we're verifying after the marking phase of a Full GC then we can't
+    // treat the perm gen as roots into the G1 heap. Some of the objects in
+    // the perm gen may be dead and hence not marked. If one of these dead
+    // objects is considered to be a root then we may end up with a false
+    // "Root location <x> points to dead ob <y>" failure.
+    if (vo != VerifyOption_G1UseMarkWord) {
+      // Since we used "collecting_perm_gen" == true above, we will not have
+      // checked the refs from perm into the G1-collected heap. We check those
+      // references explicitly below. Whether the relevant cards are dirty
+      // is checked further below in the rem set verification.
+      if (!silent) { gclog_or_tty->print("Permgen roots "); }
+      perm_gen()->oop_iterate(&rootsCl);
+    }
     bool failures = rootsCl.failures();
-    if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
-    verify_region_sets();
+
+    if (vo != VerifyOption_G1UseMarkWord) {
+      // If we're verifying during a full GC then the region sets
+      // will have been torn down at the start of the GC. Therefore
+      // verifying the region sets will fail. So we only verify
+      // the region sets when not in a full GC.
+      if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
+      verify_region_sets();
+    }
+
     if (!silent) { gclog_or_tty->print("HeapRegions "); }
     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
              "sanity check");
 
-      G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
+      G1ParVerifyTask task(this, allow_dirty, vo);
       int n_workers = workers()->total_workers();
       set_par_threads(n_workers);
       workers()->run_task(&task);
@@ -2880,8 +2999,8 @@
       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
              "sanity check");
     } else {
-      VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
-      _hrs->iterate(&blk);
+      VerifyRegionClosure blk(allow_dirty, false, vo);
+      heap_region_iterate(&blk);
       if (blk.failures()) {
         failures = true;
       }
@@ -2896,7 +3015,7 @@
 #ifndef PRODUCT
       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
         concurrent_mark()->print_reachable("at-verification-failure",
-                                           use_prev_marking, false /* all */);
+                                           vo, false /* all */);
       }
 #endif
       gclog_or_tty->flush();
@@ -2950,7 +3069,7 @@
 
 void G1CollectedHeap::print_on_extended(outputStream* st) const {
   PrintRegionClosure blk(st);
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
 }
 
 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
@@ -2989,14 +3108,55 @@
   SpecializationStats::print();
 }
 
-int G1CollectedHeap::addr_to_arena_id(void* addr) const {
-  HeapRegion* hr = heap_region_containing(addr);
-  if (hr == NULL) {
-    return 0;
-  } else {
-    return 1;
-  }
-}
+#ifndef PRODUCT
+// Helpful for debugging RSet issues.
+
+class PrintRSetsClosure : public HeapRegionClosure {
+private:
+  const char* _msg;
+  size_t _occupied_sum;
+
+public:
+  bool doHeapRegion(HeapRegion* r) {
+    HeapRegionRemSet* hrrs = r->rem_set();
+    size_t occupied = hrrs->occupied();
+    _occupied_sum += occupied;
+
+    gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
+                           HR_FORMAT_PARAMS(r));
+    if (occupied == 0) {
+      gclog_or_tty->print_cr("  RSet is empty");
+    } else {
+      hrrs->print();
+    }
+    gclog_or_tty->print_cr("----------");
+    return false;
+  }
+
+  PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
+    gclog_or_tty->cr();
+    gclog_or_tty->print_cr("========================================");
+    gclog_or_tty->print_cr(msg);
+    gclog_or_tty->cr();
+  }
+
+  ~PrintRSetsClosure() {
+    gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
+    gclog_or_tty->print_cr("========================================");
+    gclog_or_tty->cr();
+  }
+};
+
+void G1CollectedHeap::print_cset_rsets() {
+  PrintRSetsClosure cl("Printing CSet RSets");
+  collection_set_iterate(&cl);
+}
+
+void G1CollectedHeap::print_all_rsets() {
+  PrintRSetsClosure cl("Printing All RSets");;
+  heap_region_iterate(&cl);
+}
+#endif // PRODUCT
 
 G1CollectedHeap* G1CollectedHeap::heap() {
   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
@@ -3020,6 +3180,10 @@
   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
                         "derived pointer present"));
   // always_do_update_barrier = true;
+
+  // We have just completed a GC. Update the soft reference
+  // policy with the new heap occupancy
+  Universe::update_heap_info_at_gc();
 }
 
 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
@@ -3053,30 +3217,6 @@
   }
 }
 
-class VerifyMarkedObjsClosure: public ObjectClosure {
-    G1CollectedHeap* _g1h;
-    public:
-    VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
-    void do_object(oop obj) {
-      assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
-             "markandsweep mark should agree with concurrent deadness");
-    }
-};
-
-void
-G1CollectedHeap::checkConcurrentMark() {
-    VerifyMarkedObjsClosure verifycl(this);
-    //    MutexLockerEx x(getMarkBitMapLock(),
-    //              Mutex::_no_safepoint_check_flag);
-    object_iterate(&verifycl, false);
-}
-
-void G1CollectedHeap::do_sync_mark() {
-  _cm->checkpointRootsInitial();
-  _cm->markFromRoots();
-  _cm->checkpointRootsFinal(false);
-}
-
 // <NEW PREDICTION>
 
 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
@@ -3149,12 +3289,27 @@
 
 // </NEW PREDICTION>
 
-struct PrepareForRSScanningClosure : public HeapRegionClosure {
-  bool doHeapRegion(HeapRegion *r) {
-    r->rem_set()->set_iter_claimed(0);
+#ifdef ASSERT
+class VerifyCSetClosure: public HeapRegionClosure {
+public:
+  bool doHeapRegion(HeapRegion* hr) {
+    // Here we check that the CSet region's RSet is ready for parallel
+    // iteration. The fields that we'll verify are only manipulated
+    // when the region is part of a CSet and is collected. Afterwards,
+    // we reset these fields when we clear the region's RSet (when the
+    // region is freed) so they are ready when the region is
+    // re-allocated. The only exception to this is if there's an
+    // evacuation failure and instead of freeing the region we leave
+    // it in the heap. In that case, we reset these fields during
+    // evacuation failure handling.
+    guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
+
+    // Here's a good place to add any other checks we'd like to
+    // perform on CSet regions.
     return false;
   }
 };
+#endif // ASSERT
 
 #if TASKQUEUE_STATS
 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
@@ -3210,13 +3365,20 @@
     // for the duration of this pause.
     g1_policy()->decide_on_conc_mark_initiation();
 
+    // We do not allow initial-mark to be piggy-backed on a
+    // partially-young GC.
+    assert(!g1_policy()->during_initial_mark_pause() ||
+            g1_policy()->full_young_gcs(), "sanity");
+
+    // We also do not allow partially-young GCs during marking.
+    assert(!mark_in_progress() || g1_policy()->full_young_gcs(), "sanity");
+
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
-    if (g1_policy()->in_young_gc_mode()) {
-      if (g1_policy()->full_young_gcs())
-        strcat(verbose_str, "(young)");
-      else
-        strcat(verbose_str, "(partial)");
+    if (g1_policy()->full_young_gcs()) {
+      strcat(verbose_str, "(young)");
+    } else {
+      strcat(verbose_str, "(partial)");
     }
     if (g1_policy()->during_initial_mark_pause()) {
       strcat(verbose_str, " (initial-mark)");
@@ -3245,136 +3407,155 @@
       append_secondary_free_list_if_not_empty_with_lock();
     }
 
-    increment_gc_time_stamp();
-
-    if (g1_policy()->in_young_gc_mode()) {
-      assert(check_young_list_well_formed(),
-             "young list should be well formed");
-    }
+    assert(check_young_list_well_formed(),
+      "young list should be well formed");
 
     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
       IsGCActiveMark x;
 
       gc_prologue(false);
       increment_total_collections(false /* full gc */);
-
-#if G1_REM_SET_LOGGING
-      gclog_or_tty->print_cr("\nJust chose CS, heap:");
-      print();
-#endif
+      increment_gc_time_stamp();
 
       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
         HandleMark hm;  // Discard invalid handles created during verification
         gclog_or_tty->print(" VerifyBeforeGC:");
         prepare_for_verify();
-        Universe::verify(false);
+        Universe::verify(/* allow dirty */ false,
+                         /* silent      */ false,
+                         /* option      */ VerifyOption_G1UsePrevMarking);
+
       }
 
       COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-      // Please see comment in G1CollectedHeap::ref_processing_init()
-      // to see how reference processing currently works in G1.
-      //
-      // We want to turn off ref discovery, if necessary, and turn it back on
-      // on again later if we do. XXX Dubious: why is discovery disabled?
-      bool was_enabled = ref_processor()->discovery_enabled();
-      if (was_enabled) ref_processor()->disable_discovery();
-
-      // Forget the current alloc region (we might even choose it to be part
-      // of the collection set!).
-      release_mutator_alloc_region();
-
-      // The elapsed time induced by the start time below deliberately elides
-      // the possible verification above.
-      double start_time_sec = os::elapsedTime();
-      size_t start_used_bytes = used();
+      // Please see comment in g1CollectedHeap.hpp and
+      // G1CollectedHeap::ref_processing_init() to see how
+      // reference processing currently works in G1.
+
+      // Enable discovery in the STW reference processor
+      ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
+                                            true /*verify_no_refs*/);
+
+      {
+        // We want to temporarily turn off discovery by the
+        // CM ref processor, if necessary, and turn it back on
+        // on again later if we do. Using a scoped
+        // NoRefDiscovery object will do this.
+        NoRefDiscovery no_cm_discovery(ref_processor_cm());
+
+        // Forget the current alloc region (we might even choose it to be part
+        // of the collection set!).
+        release_mutator_alloc_region();
+
+        // We should call this after we retire the mutator alloc
+        // region(s) so that all the ALLOC / RETIRE events are generated
+        // before the start GC event.
+        _hr_printer.start_gc(false /* full */, (size_t) total_collections());
+
+        // The elapsed time induced by the start time below deliberately elides
+        // the possible verification above.
+        double start_time_sec = os::elapsedTime();
+        size_t start_used_bytes = used();
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
-      _young_list->print();
-      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+        gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
+        _young_list->print();
+        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-      g1_policy()->record_collection_pause_start(start_time_sec,
-                                                 start_used_bytes);
+        g1_policy()->record_collection_pause_start(start_time_sec,
+                                                   start_used_bytes);
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
-      _young_list->print();
+        gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
+        _young_list->print();
 #endif // YOUNG_LIST_VERBOSE
 
-      if (g1_policy()->during_initial_mark_pause()) {
-        concurrent_mark()->checkpointRootsInitialPre();
-      }
-      save_marks();
-
-      // We must do this before any possible evacuation that should propagate
-      // marks.
-      if (mark_in_progress()) {
-        double start_time_sec = os::elapsedTime();
-
-        _cm->drainAllSATBBuffers();
-        double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
-        g1_policy()->record_satb_drain_time(finish_mark_ms);
-      }
-      // Record the number of elements currently on the mark stack, so we
-      // only iterate over these.  (Since evacuation may add to the mark
-      // stack, doing more exposes race conditions.)  If no mark is in
-      // progress, this will be zero.
-      _cm->set_oops_do_bound();
-
-      if (mark_in_progress()) {
-        concurrent_mark()->newCSet();
-      }
+        if (g1_policy()->during_initial_mark_pause()) {
+          concurrent_mark()->checkpointRootsInitialPre();
+        }
+        perm_gen()->save_marks();
+
+        // We must do this before any possible evacuation that should propagate
+        // marks.
+        if (mark_in_progress()) {
+          double start_time_sec = os::elapsedTime();
+
+          _cm->drainAllSATBBuffers();
+          double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
+          g1_policy()->record_satb_drain_time(finish_mark_ms);
+        }
+        // Record the number of elements currently on the mark stack, so we
+        // only iterate over these.  (Since evacuation may add to the mark
+        // stack, doing more exposes race conditions.)  If no mark is in
+        // progress, this will be zero.
+        _cm->set_oops_do_bound();
+
+        if (mark_in_progress()) {
+          concurrent_mark()->newCSet();
+        }
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
-      _young_list->print();
-      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+        gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
+        _young_list->print();
+        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-      g1_policy()->choose_collection_set(target_pause_time_ms);
-
-      // We have chosen the complete collection set. If marking is
-      // active then, we clear the region fields of any of the
-      // concurrent marking tasks whose region fields point into
-      // the collection set as these values will become stale. This
-      // will cause the owning marking threads to claim a new region
-      // when marking restarts.
-      if (mark_in_progress()) {
-        concurrent_mark()->reset_active_task_region_fields_in_cset();
-      }
-
-      // Nothing to do if we were unable to choose a collection set.
-#if G1_REM_SET_LOGGING
-      gclog_or_tty->print_cr("\nAfter pause, heap:");
-      print();
-#endif
-      PrepareForRSScanningClosure prepare_for_rs_scan;
-      collection_set_iterate(&prepare_for_rs_scan);
-
-      setup_surviving_young_words();
-
-      // Set up the gc allocation regions.
-      get_gc_alloc_regions();
-
-      // Actually do the work...
-      evacuate_collection_set();
-
-      free_collection_set(g1_policy()->collection_set());
-      g1_policy()->clear_collection_set();
-
-      cleanup_surviving_young_words();
-
-      // Start a new incremental collection set for the next pause.
-      g1_policy()->start_incremental_cset_building();
-
-      // Clear the _cset_fast_test bitmap in anticipation of adding
-      // regions to the incremental collection set for the next
-      // evacuation pause.
-      clear_cset_fast_test();
-
-      if (g1_policy()->in_young_gc_mode()) {
+        g1_policy()->choose_collection_set(target_pause_time_ms);
+
+        if (_hr_printer.is_active()) {
+          HeapRegion* hr = g1_policy()->collection_set();
+          while (hr != NULL) {
+            G1HRPrinter::RegionType type;
+            if (!hr->is_young()) {
+              type = G1HRPrinter::Old;
+            } else if (hr->is_survivor()) {
+              type = G1HRPrinter::Survivor;
+            } else {
+              type = G1HRPrinter::Eden;
+            }
+            _hr_printer.cset(hr);
+            hr = hr->next_in_collection_set();
+          }
+        }
+
+        // We have chosen the complete collection set. If marking is
+        // active then, we clear the region fields of any of the
+        // concurrent marking tasks whose region fields point into
+        // the collection set as these values will become stale. This
+        // will cause the owning marking threads to claim a new region
+        // when marking restarts.
+        if (mark_in_progress()) {
+          concurrent_mark()->reset_active_task_region_fields_in_cset();
+        }
+
+#ifdef ASSERT
+        VerifyCSetClosure cl;
+        collection_set_iterate(&cl);
+#endif // ASSERT
+
+        setup_surviving_young_words();
+
+        // Initialize the GC alloc regions.
+        init_gc_alloc_regions();
+
+        // Actually do the work...
+        evacuate_collection_set();
+
+        free_collection_set(g1_policy()->collection_set());
+        g1_policy()->clear_collection_set();
+
+        cleanup_surviving_young_words();
+
+        // Start a new incremental collection set for the next pause.
+        g1_policy()->start_incremental_cset_building();
+
+        // Clear the _cset_fast_test bitmap in anticipation of adding
+        // regions to the incremental collection set for the next
+        // evacuation pause.
+        clear_cset_fast_test();
+
         _young_list->reset_sampled_info();
 
         // Don't check the whole heap at this point as the
@@ -3382,7 +3563,7 @@
         // as survivors and moved on to the survivor list.
         // Survivor regions will fail the !is_young() check.
         assert(check_young_list_empty(false /* check_heap */),
-               "young list should be empty");
+          "young list should be empty");
 
 #if YOUNG_LIST_VERBOSE
         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
@@ -3390,64 +3571,106 @@
 #endif // YOUNG_LIST_VERBOSE
 
         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
-                                          _young_list->first_survivor_region(),
-                                          _young_list->last_survivor_region());
+                                            _young_list->first_survivor_region(),
+                                            _young_list->last_survivor_region());
 
         _young_list->reset_auxilary_lists();
-      }
-
-      if (evacuation_failed()) {
-        _summary_bytes_used = recalculate_used();
-      } else {
-        // The "used" of the the collection set have already been subtracted
-        // when they were freed.  Add in the bytes evacuated.
-        _summary_bytes_used += g1_policy()->bytes_in_to_space();
-      }
-
-      if (g1_policy()->in_young_gc_mode() &&
-          g1_policy()->during_initial_mark_pause()) {
-        concurrent_mark()->checkpointRootsInitialPost();
-        set_marking_started();
-        // CAUTION: after the doConcurrentMark() call below,
-        // the concurrent marking thread(s) could be running
-        // concurrently with us. Make sure that anything after
-        // this point does not assume that we are the only GC thread
-        // running. Note: of course, the actual marking work will
-        // not start until the safepoint itself is released in
-        // ConcurrentGCThread::safepoint_desynchronize().
-        doConcurrentMark();
-      }
-
-      allocate_dummy_regions();
+
+        if (evacuation_failed()) {
+          _summary_bytes_used = recalculate_used();
+        } else {
+          // The "used" of the the collection set have already been subtracted
+          // when they were freed.  Add in the bytes evacuated.
+          _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
+        }
+
+        if (g1_policy()->during_initial_mark_pause()) {
+          concurrent_mark()->checkpointRootsInitialPost();
+          set_marking_started();
+          // CAUTION: after the doConcurrentMark() call below,
+          // the concurrent marking thread(s) could be running
+          // concurrently with us. Make sure that anything after
+          // this point does not assume that we are the only GC thread
+          // running. Note: of course, the actual marking work will
+          // not start until the safepoint itself is released in
+          // ConcurrentGCThread::safepoint_desynchronize().
+          doConcurrentMark();
+        }
+
+        allocate_dummy_regions();
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
-      _young_list->print();
-      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+        gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
+        _young_list->print();
+        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-      init_mutator_alloc_region();
-
-      double end_time_sec = os::elapsedTime();
-      double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
-      g1_policy()->record_pause_time_ms(pause_time_ms);
-      g1_policy()->record_collection_pause_end();
-
-      MemoryService::track_memory_usage();
-
-      if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
-        HandleMark hm;  // Discard invalid handles created during verification
-        gclog_or_tty->print(" VerifyAfterGC:");
-        prepare_for_verify();
-        Universe::verify(false);
+        init_mutator_alloc_region();
+
+        {
+          size_t expand_bytes = g1_policy()->expansion_amount();
+          if (expand_bytes > 0) {
+            size_t bytes_before = capacity();
+            if (!expand(expand_bytes)) {
+              // We failed to expand the heap so let's verify that
+              // committed/uncommitted amount match the backing store
+              assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
+              assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
+            }
+          }
+        }
+
+        double end_time_sec = os::elapsedTime();
+        double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
+        g1_policy()->record_pause_time_ms(pause_time_ms);
+        g1_policy()->record_collection_pause_end();
+
+        MemoryService::track_memory_usage();
+
+        // In prepare_for_verify() below we'll need to scan the deferred
+        // update buffers to bring the RSets up-to-date if
+        // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
+        // the update buffers we'll probably need to scan cards on the
+        // regions we just allocated to (i.e., the GC alloc
+        // regions). However, during the last GC we called
+        // set_saved_mark() on all the GC alloc regions, so card
+        // scanning might skip the [saved_mark_word()...top()] area of
+        // those regions (i.e., the area we allocated objects into
+        // during the last GC). But it shouldn't. Given that
+        // saved_mark_word() is conditional on whether the GC time stamp
+        // on the region is current or not, by incrementing the GC time
+        // stamp here we invalidate all the GC time stamps on all the
+        // regions and saved_mark_word() will simply return top() for
+        // all the regions. This is a nicer way of ensuring this rather
+        // than iterating over the regions and fixing them. In fact, the
+        // GC time stamp increment here also ensures that
+        // saved_mark_word() will return top() between pauses, i.e.,
+        // during concurrent refinement. So we don't need the
+        // is_gc_active() check to decided which top to use when
+        // scanning cards (see CR 7039627).
+        increment_gc_time_stamp();
+
+        if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
+          HandleMark hm;  // Discard invalid handles created during verification
+          gclog_or_tty->print(" VerifyAfterGC:");
+          prepare_for_verify();
+          Universe::verify(/* allow dirty */ true,
+                           /* silent      */ false,
+                           /* option      */ VerifyOption_G1UsePrevMarking);
+        }
+
+        assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
+        ref_processor_stw()->verify_no_references_recorded();
+
+        // CM reference discovery will be re-enabled if necessary.
       }
 
-      if (was_enabled) ref_processor()->enable_discovery();
-
       {
         size_t expand_bytes = g1_policy()->expansion_amount();
         if (expand_bytes > 0) {
           size_t bytes_before = capacity();
+          // No need for an ergo verbose message here,
+          // expansion_amount() does this when it returns a value > 0.
           if (!expand(expand_bytes)) {
             // We failed to expand the heap so let's verify that
             // committed/uncommitted amount match the backing store
@@ -3457,6 +3680,15 @@
         }
       }
 
+      // We should do this after we potentially expand the heap so
+      // that all the COMMIT events are generated before the end GC
+      // event, and after we retire the GC alloc regions so that all
+      // RETIRE events are generated before the end GC event.
+      _hr_printer.end_gc(false /* full */, (size_t) total_collections());
+
+      // We have to do this after we decide whether to expand the heap or not.
+      g1_policy()->print_heap_transition();
+
       if (mark_in_progress()) {
         concurrent_mark()->update_g1_committed();
       }
@@ -3475,6 +3707,7 @@
     }
   }
 
+  _hrs.verify_optional();
   verify_region_sets_optional();
 
   TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
@@ -3483,7 +3716,7 @@
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
-  g1mm()->update_counters();
+  g1mm()->update_sizes();
 
   if (G1SummarizeRSetStats &&
       (G1SummarizeRSetStatsPeriod > 0) &&
@@ -3522,254 +3755,49 @@
   assert(_mutator_alloc_region.get() == NULL, "post-condition");
 }
 
-void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
-  assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
-  // make sure we don't call set_gc_alloc_region() multiple times on
-  // the same region
-  assert(r == NULL || !r->is_gc_alloc_region(),
-         "shouldn't already be a GC alloc region");
-  assert(r == NULL || !r->isHumongous(),
-         "humongous regions shouldn't be used as GC alloc regions");
-
-  HeapWord* original_top = NULL;
-  if (r != NULL)
-    original_top = r->top();
-
-  // We will want to record the used space in r as being there before gc.
-  // One we install it as a GC alloc region it's eligible for allocation.
-  // So record it now and use it later.
-  size_t r_used = 0;
-  if (r != NULL) {
-    r_used = r->used();
-
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
-      // need to take the lock to guard against two threads calling
-      // get_gc_alloc_region concurrently (very unlikely but...)
-      MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-      r->save_marks();
-    }
-  }
-  HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
-  _gc_alloc_regions[purpose] = r;
-  if (old_alloc_region != NULL) {
-    // Replace aliases too.
-    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-      if (_gc_alloc_regions[ap] == old_alloc_region) {
-        _gc_alloc_regions[ap] = r;
-      }
-    }
-  }
-  if (r != NULL) {
-    push_gc_alloc_region(r);
-    if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
-      // We are using a region as a GC alloc region after it has been used
-      // as a mutator allocation region during the current marking cycle.
-      // The mutator-allocated objects are currently implicitly marked, but
-      // when we move hr->next_top_at_mark_start() forward at the the end
-      // of the GC pause, they won't be.  We therefore mark all objects in
-      // the "gap".  We do this object-by-object, since marking densely
-      // does not currently work right with marking bitmap iteration.  This
-      // means we rely on TLAB filling at the start of pauses, and no
-      // "resuscitation" of filled TLAB's.  If we want to do this, we need
-      // to fix the marking bitmap iteration.
-      HeapWord* curhw = r->next_top_at_mark_start();
-      HeapWord* t = original_top;
-
-      while (curhw < t) {
-        oop cur = (oop)curhw;
-        // We'll assume parallel for generality.  This is rare code.
-        concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
-        curhw = curhw + cur->size();
-      }
-      assert(curhw == t, "Should have parsed correctly.");
-    }
-    if (G1PolicyVerbose > 1) {
-      gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
-                          "for survivors:", r->bottom(), original_top, r->end());
-      r->print();
-    }
-    g1_policy()->record_before_bytes(r_used);
-  }
-}
-
-void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
-  assert(Thread::current()->is_VM_thread() ||
-         FreeList_lock->owned_by_self(), "Precondition");
-  assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
-         "Precondition.");
-  hr->set_is_gc_alloc_region(true);
-  hr->set_next_gc_alloc_region(_gc_alloc_region_list);
-  _gc_alloc_region_list = hr;
-}
-
-#ifdef G1_DEBUG
-class FindGCAllocRegion: public HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion* r) {
-    if (r->is_gc_alloc_region()) {
-      gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
-                             r->hrs_index(), r->bottom());
-    }
-    return false;
-  }
-};
-#endif // G1_DEBUG
-
-void G1CollectedHeap::forget_alloc_region_list() {
+void G1CollectedHeap::init_gc_alloc_regions() {
   assert_at_safepoint(true /* should_be_vm_thread */);
-  while (_gc_alloc_region_list != NULL) {
-    HeapRegion* r = _gc_alloc_region_list;
-    assert(r->is_gc_alloc_region(), "Invariant.");
-    // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
-    // newly allocated data in order to be able to apply deferred updates
-    // before the GC is done for verification purposes (i.e to allow
-    // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
-    // collection.
-    r->ContiguousSpace::set_saved_mark();
-    _gc_alloc_region_list = r->next_gc_alloc_region();
-    r->set_next_gc_alloc_region(NULL);
-    r->set_is_gc_alloc_region(false);
-    if (r->is_survivor()) {
-      if (r->is_empty()) {
-        r->set_not_young();
-      } else {
-        _young_list->add_survivor_region(r);
-      }
-    }
-  }
-#ifdef G1_DEBUG
-  FindGCAllocRegion fa;
-  heap_region_iterate(&fa);
-#endif // G1_DEBUG
-}
-
-
-bool G1CollectedHeap::check_gc_alloc_regions() {
-  // TODO: allocation regions check
-  return true;
-}
-
-void G1CollectedHeap::get_gc_alloc_regions() {
-  // First, let's check that the GC alloc region list is empty (it should)
-  assert(_gc_alloc_region_list == NULL, "invariant");
-
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    assert(_gc_alloc_regions[ap] == NULL, "invariant");
-    assert(_gc_alloc_region_counts[ap] == 0, "invariant");
-
-    // Create new GC alloc regions.
-    HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
-    _retained_gc_alloc_regions[ap] = NULL;
-
-    if (alloc_region != NULL) {
-      assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
-
-      // let's make sure that the GC alloc region is not tagged as such
-      // outside a GC operation
-      assert(!alloc_region->is_gc_alloc_region(), "sanity");
-
-      if (alloc_region->in_collection_set() ||
-          alloc_region->top() == alloc_region->end() ||
-          alloc_region->top() == alloc_region->bottom() ||
-          alloc_region->isHumongous()) {
-        // we will discard the current GC alloc region if
-        // * it's in the collection set (it can happen!),
-        // * it's already full (no point in using it),
-        // * it's empty (this means that it was emptied during
-        // a cleanup and it should be on the free list now), or
-        // * it's humongous (this means that it was emptied
-        // during a cleanup and was added to the free list, but
-        // has been subseqently used to allocate a humongous
-        // object that may be less than the region size).
-
-        alloc_region = NULL;
-      }
-    }
-
-    if (alloc_region == NULL) {
-      // we will get a new GC alloc region
-      alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords);
-    } else {
-      // the region was retained from the last collection
-      ++_gc_alloc_region_counts[ap];
-      if (G1PrintHeapRegions) {
-        gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
-                               "top "PTR_FORMAT,
-                               alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
-      }
-    }
-
-    if (alloc_region != NULL) {
-      assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
-      set_gc_alloc_region(ap, alloc_region);
-    }
-
-    assert(_gc_alloc_regions[ap] == NULL ||
-           _gc_alloc_regions[ap]->is_gc_alloc_region(),
-           "the GC alloc region should be tagged as such");
-    assert(_gc_alloc_regions[ap] == NULL ||
-           _gc_alloc_regions[ap] == _gc_alloc_region_list,
-           "the GC alloc region should be the same as the GC alloc list head");
-  }
-  // Set alternative regions for allocation purposes that have reached
-  // their limit.
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
-    if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
-      _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
-    }
-  }
-  assert(check_gc_alloc_regions(), "alloc regions messed up");
-}
-
-void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
-  // We keep a separate list of all regions that have been alloc regions in
-  // the current collection pause. Forget that now. This method will
-  // untag the GC alloc regions and tear down the GC alloc region
-  // list. It's desirable that no regions are tagged as GC alloc
-  // outside GCs.
-
-  forget_alloc_region_list();
-
-  // The current alloc regions contain objs that have survived
-  // collection. Make them no longer GC alloc regions.
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    HeapRegion* r = _gc_alloc_regions[ap];
-    _retained_gc_alloc_regions[ap] = NULL;
-    _gc_alloc_region_counts[ap] = 0;
-
-    if (r != NULL) {
-      // we retain nothing on _gc_alloc_regions between GCs
-      set_gc_alloc_region(ap, NULL);
-
-      if (r->is_empty()) {
-        // We didn't actually allocate anything in it; let's just put
-        // it back on the free list.
-        _free_list.add_as_head(r);
-      } else if (_retain_gc_alloc_region[ap] && !totally) {
-        // retain it so that we can use it at the beginning of the next GC
-        _retained_gc_alloc_regions[ap] = r;
-      }
-    }
-  }
-}
-
-#ifndef PRODUCT
-// Useful for debugging
-
-void G1CollectedHeap::print_gc_alloc_regions() {
-  gclog_or_tty->print_cr("GC alloc regions");
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    HeapRegion* r = _gc_alloc_regions[ap];
-    if (r == NULL) {
-      gclog_or_tty->print_cr("  %2d : "PTR_FORMAT, ap, NULL);
-    } else {
-      gclog_or_tty->print_cr("  %2d : "PTR_FORMAT" "SIZE_FORMAT,
-                             ap, r->bottom(), r->used());
-    }
-  }
-}
-#endif // PRODUCT
+
+  _survivor_gc_alloc_region.init();
+  _old_gc_alloc_region.init();
+  HeapRegion* retained_region = _retained_old_gc_alloc_region;
+  _retained_old_gc_alloc_region = NULL;
+
+  // We will discard the current GC alloc region if:
+  // a) it's in the collection set (it can happen!),
+  // b) it's already full (no point in using it),
+  // c) it's empty (this means that it was emptied during
+  // a cleanup and it should be on the free list now), or
+  // d) it's humongous (this means that it was emptied
+  // during a cleanup and was added to the free list, but
+  // has been subseqently used to allocate a humongous
+  // object that may be less than the region size).
+  if (retained_region != NULL &&
+      !retained_region->in_collection_set() &&
+      !(retained_region->top() == retained_region->end()) &&
+      !retained_region->is_empty() &&
+      !retained_region->isHumongous()) {
+    retained_region->set_saved_mark();
+    _old_gc_alloc_region.set(retained_region);
+    _hr_printer.reuse(retained_region);
+  }
+}
+
+void G1CollectedHeap::release_gc_alloc_regions() {
+  _survivor_gc_alloc_region.release();
+  // If we have an old GC alloc region to release, we'll save it in
+  // _retained_old_gc_alloc_region. If we don't
+  // _retained_old_gc_alloc_region will become NULL. This is what we
+  // want either way so no reason to check explicitly for either
+  // condition.
+  _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
+}
+
+void G1CollectedHeap::abandon_gc_alloc_regions() {
+  assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
+  assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
+  _retained_old_gc_alloc_region = NULL;
+}
 
 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
   _drain_in_progress = false;
@@ -3786,54 +3814,6 @@
   _evac_failure_scan_stack = NULL;
 }
 
-
-
-// *** Sequential G1 Evacuation
-
-class G1IsAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
-public:
-  G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  void do_object(oop p) { assert(false, "Do not call."); }
-  bool do_object_b(oop p) {
-    // It is reachable if it is outside the collection set, or is inside
-    // and forwarded.
-
-#ifdef G1_DEBUG
-    gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
-                           (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
-                           !_g1->obj_in_cs(p) || p->is_forwarded());
-#endif // G1_DEBUG
-
-    return !_g1->obj_in_cs(p) || p->is_forwarded();
-  }
-};
-
-class G1KeepAliveClosure: public OopClosure {
-  G1CollectedHeap* _g1;
-public:
-  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
-  void do_oop(      oop* p) {
-    oop obj = *p;
-#ifdef G1_DEBUG
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
-                             p, (void*) obj, (void*) *p);
-    }
-#endif // G1_DEBUG
-
-    if (_g1->obj_in_cs(obj)) {
-      assert( obj->is_forwarded(), "invariant" );
-      *p = obj->forwardee();
-#ifdef G1_DEBUG
-      gclog_or_tty->print_cr("     in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
-                             (void*) obj, (void*) *p);
-#endif // G1_DEBUG
-    }
-  }
-};
-
 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
 private:
   G1CollectedHeap* _g1;
@@ -3949,6 +3929,14 @@
       assert(cur->in_collection_set(), "bad CS");
       RemoveSelfPointerClosure rspc(_g1h, cur, cl);
 
+      // In the common case we make sure that this is done when the
+      // region is freed so that it is "ready-to-go" when it's
+      // re-allocated. However, when evacuation failure happens, a
+      // region will remain in the heap and might ultimately be added
+      // to a CSet in the future. So we have to be careful here and
+      // make sure the region's RSet is ready for parallel iteration
+      // whenever this might be required in the future.
+      cur->rem_set()->reset_for_par_iteration();
       cur->reset_bot();
       cl->set_region(cur);
       cur->object_iterate(&rspc);
@@ -4016,7 +4004,8 @@
 
 oop
 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
-                                               oop old) {
+                                               oop old,
+                                               bool should_mark_root) {
   assert(obj_in_cs(old),
          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
                  (HeapWord*) old));
@@ -4024,6 +4013,16 @@
   oop forward_ptr = old->forward_to_atomic(old);
   if (forward_ptr == NULL) {
     // Forward-to-self succeeded.
+
+    // should_mark_root will be true when this routine is called
+    // from a root scanning closure during an initial mark pause.
+    // In this case the thread that succeeds in self-forwarding the
+    // object is also responsible for marking the object.
+    if (should_mark_root) {
+      assert(!oopDesc::is_null(old), "shouldn't be");
+      _cm->grayRoot(old);
+    }
+
     if (_evac_failure_closure != cl) {
       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
       assert(!_drain_in_progress,
@@ -4061,11 +4060,7 @@
   HeapRegion* r = heap_region_containing(old);
   if (!r->evacuation_failed()) {
     r->set_evacuation_failed(true);
-    if (G1PrintHeapRegions) {
-      gclog_or_tty->print("overflow in heap region "PTR_FORMAT" "
-                          "["PTR_FORMAT","PTR_FORMAT")\n",
-                          r, r->bottom(), r->end());
-    }
+    _hr_printer.evac_failure(r);
   }
 
   push_on_evac_failure_scan_stack(old);
@@ -4095,135 +4090,32 @@
   }
 }
 
-// *** Parallel G1 Evacuation
-
 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
                                                   size_t word_size) {
-  assert(!isHumongous(word_size),
-         err_msg("we should not be seeing humongous allocation requests "
-                 "during GC, word_size = "SIZE_FORMAT, word_size));
-
-  HeapRegion* alloc_region = _gc_alloc_regions[purpose];
-  // let the caller handle alloc failure
-  if (alloc_region == NULL) return NULL;
-
-  HeapWord* block = alloc_region->par_allocate(word_size);
-  if (block == NULL) {
-    block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
-  }
-  return block;
-}
-
-void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
-                                            bool par) {
-  // Another thread might have obtained alloc_region for the given
-  // purpose, and might be attempting to allocate in it, and might
-  // succeed.  Therefore, we can't do the "finalization" stuff on the
-  // region below until we're sure the last allocation has happened.
-  // We ensure this by allocating the remaining space with a garbage
-  // object.
-  if (par) par_allocate_remaining_space(alloc_region);
-  // Now we can do the post-GC stuff on the region.
-  alloc_region->note_end_of_copying();
-  g1_policy()->record_after_bytes(alloc_region->used());
-}
-
-HeapWord*
-G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
-                                         HeapRegion*    alloc_region,
-                                         bool           par,
-                                         size_t         word_size) {
-  assert(!isHumongous(word_size),
-         err_msg("we should not be seeing humongous allocation requests "
-                 "during GC, word_size = "SIZE_FORMAT, word_size));
-
-  // We need to make sure we serialize calls to this method. Given
-  // that the FreeList_lock guards accesses to the free_list anyway,
-  // and we need to potentially remove a region from it, we'll use it
-  // to protect the whole call.
-  MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-
-  HeapWord* block = NULL;
-  // In the parallel case, a previous thread to obtain the lock may have
-  // already assigned a new gc_alloc_region.
-  if (alloc_region != _gc_alloc_regions[purpose]) {
-    assert(par, "But should only happen in parallel case.");
-    alloc_region = _gc_alloc_regions[purpose];
-    if (alloc_region == NULL) return NULL;
-    block = alloc_region->par_allocate(word_size);
-    if (block != NULL) return block;
-    // Otherwise, continue; this new region is empty, too.
-  }
-  assert(alloc_region != NULL, "We better have an allocation region");
-  retire_alloc_region(alloc_region, par);
-
-  if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
-    // Cannot allocate more regions for the given purpose.
-    GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
-    // Is there an alternative?
-    if (purpose != alt_purpose) {
-      HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
-      // Has not the alternative region been aliased?
-      if (alloc_region != alt_region && alt_region != NULL) {
-        // Try to allocate in the alternative region.
-        if (par) {
-          block = alt_region->par_allocate(word_size);
-        } else {
-          block = alt_region->allocate(word_size);
-        }
-        // Make an alias.
-        _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
-        if (block != NULL) {
-          return block;
-        }
-        retire_alloc_region(alt_region, par);
-      }
-      // Both the allocation region and the alternative one are full
-      // and aliased, replace them with a new allocation region.
-      purpose = alt_purpose;
+  if (purpose == GCAllocForSurvived) {
+    HeapWord* result = survivor_attempt_allocation(word_size);
+    if (result != NULL) {
+      return result;
     } else {
-      set_gc_alloc_region(purpose, NULL);
-      return NULL;
+      // Let's try to allocate in the old gen in case we can fit the
+      // object there.
+      return old_attempt_allocation(word_size);
     }
-  }
-
-  // Now allocate a new region for allocation.
-  alloc_region = new_gc_alloc_region(purpose, word_size);
-
-  // let the caller handle alloc failure
-  if (alloc_region != NULL) {
-
-    assert(check_gc_alloc_regions(), "alloc regions messed up");
-    assert(alloc_region->saved_mark_at_top(),
-           "Mark should have been saved already.");
-    // This must be done last: once it's installed, other regions may
-    // allocate in it (without holding the lock.)
-    set_gc_alloc_region(purpose, alloc_region);
-
-    if (par) {
-      block = alloc_region->par_allocate(word_size);
+  } else {
+    assert(purpose ==  GCAllocForTenured, "sanity");
+    HeapWord* result = old_attempt_allocation(word_size);
+    if (result != NULL) {
+      return result;
     } else {
-      block = alloc_region->allocate(word_size);
+      // Let's try to allocate in the survivors in case we can fit the
+      // object there.
+      return survivor_attempt_allocation(word_size);
     }
-    // Caller handles alloc failure.
-  } else {
-    // This sets other apis using the same old alloc region to NULL, also.
-    set_gc_alloc_region(purpose, NULL);
-  }
-  return block;  // May be NULL.
-}
-
-void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
-  HeapWord* block = NULL;
-  size_t free_words;
-  do {
-    free_words = r->free()/HeapWordSize;
-    // If there's too little space, no one can allocate, so we're done.
-    if (free_words < CollectedHeap::min_fill_size()) return;
-    // Otherwise, try to claim it.
-    block = r->par_allocate(free_words);
-  } while (block == NULL);
-  fill_with_object(block, free_words);
+  }
+
+  ShouldNotReachHere();
+  // Trying to keep some compilers happy.
+  return NULL;
 }
 
 #ifndef PRODUCT
@@ -4234,6 +4126,23 @@
 }
 #endif // PRODUCT
 
+G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
+  ParGCAllocBuffer(gclab_word_size),
+  _should_mark_objects(false),
+  _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
+  _retired(false)
+{
+  //_should_mark_objects is set to true when G1ParCopyHelper needs to
+  // mark the forwarded location of an evacuated object.
+  // We set _should_mark_objects to true if marking is active, i.e. when we
+  // need to propagate a mark, or during an initial mark pause, i.e. when we
+  // need to mark objects immediately reachable by the roots.
+  if (G1CollectedHeap::heap()->mark_in_progress() ||
+      G1CollectedHeap::heap()->g1_policy()->during_initial_mark_pause()) {
+    _should_mark_objects = true;
+  }
+}
+
 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
   : _g1h(g1h),
     _refs(g1h->task_queue(queue_num)),
@@ -4335,12 +4244,17 @@
 #endif // ASSERT
 
 void G1ParScanThreadState::trim_queue() {
+  assert(_evac_cl != NULL, "not set");
+  assert(_evac_failure_cl != NULL, "not set");
+  assert(_partial_scan_cl != NULL, "not set");
+
   StarTask ref;
   do {
     // Drain the overflow stack first, so other threads can steal.
     while (refs()->pop_overflow(ref)) {
       deal_with_reference(ref);
     }
+
     while (refs()->pop_local(ref)) {
       deal_with_reference(ref);
     }
@@ -4349,12 +4263,14 @@
 
 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
-  _par_scan_state(par_scan_state) { }
-
-template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
-  // This is called _after_ do_oop_work has been called, hence after
-  // the object has been relocated to its new location and *p points
-  // to its new location.
+  _par_scan_state(par_scan_state),
+  _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
+  _mark_in_progress(_g1->mark_in_progress()) { }
+
+template <class T> void G1ParCopyHelper::mark_object(T* p) {
+  // This is called from do_oop_work for objects that are not
+  // in the collection set. Objects in the collection set
+  // are marked after they have been evacuated.
 
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
@@ -4366,7 +4282,8 @@
   }
 }
 
-oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
+oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_root,
+                                                     bool should_mark_copy) {
   size_t    word_sz = old->size();
   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
   // +1 to make the -1 indexes valid...
@@ -4386,7 +4303,7 @@
     // This will either forward-to-self, or detect that someone else has
     // installed a forwarding pointer.
     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
-    return _g1->handle_evacuation_failure_par(cl, old);
+    return _g1->handle_evacuation_failure_par(cl, old, should_mark_root);
   }
 
   // We're going to allocate linearly, so might as well prefetch ahead.
@@ -4422,8 +4339,8 @@
       obj->set_mark(m);
     }
 
-    // preserve "next" mark bit
-    if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
+    // Mark the evacuated object or propagate "next" mark bit
+    if (should_mark_copy) {
       if (!use_local_bitmaps ||
           !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
         // if we couldn't mark it on the local bitmap (this happens when
@@ -4431,11 +4348,12 @@
         // the bullet and do the standard parallel mark
         _cm->markAndGrayObjectIfNecessary(obj);
       }
-#if 1
+
       if (_g1->isMarkedNext(old)) {
+        // Unmark the object's old location so that marking
+        // doesn't think the old object is alive.
         _cm->nextMarkBitMap()->parClear((HeapWord*)old);
       }
-#endif
     }
 
     size_t* surv_young_words = _par_scan_state->surviving_young_words();
@@ -4458,30 +4376,78 @@
   return obj;
 }
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
+template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
 template <class T>
-void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee>
+void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
 ::do_oop_work(T* p) {
   oop obj = oopDesc::load_decode_heap_oop(p);
   assert(barrier != G1BarrierRS || obj != NULL,
          "Precondition: G1BarrierRS implies obj is nonNull");
 
+  // Marking:
+  // If the object is in the collection set, then the thread
+  // that copies the object should mark, or propagate the
+  // mark to, the evacuated object.
+  // If the object is not in the collection set then we
+  // should call the mark_object() method depending on the
+  // value of the template parameter do_mark_object (which will
+  // be true for root scanning closures during an initial mark
+  // pause).
+  // The mark_object() method first checks whether the object
+  // is marked and, if not, attempts to mark the object.
+
   // here the null check is implicit in the cset_fast_test() test
   if (_g1->in_cset_fast_test(obj)) {
-#if G1_REM_SET_LOGGING
-    gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
-                           "into CS.", p, (void*) obj);
-#endif
     if (obj->is_forwarded()) {
       oopDesc::encode_store_heap_oop(p, obj->forwardee());
+      // If we are a root scanning closure during an initial
+      // mark pause (i.e. do_mark_object will be true) then
+      // we also need to handle marking of roots in the
+      // event of an evacuation failure. In the event of an
+      // evacuation failure, the object is forwarded to itself
+      // and not copied. For root-scanning closures, the
+      // object would be marked after a successful self-forward
+      // but an object could be pointed to by both a root and non
+      // root location and be self-forwarded by a non-root-scanning
+      // closure. Therefore we also have to attempt to mark the
+      // self-forwarded root object here.
+      if (do_mark_object && obj->forwardee() == obj) {
+        mark_object(p);
+      }
     } else {
-      oop copy_oop = copy_to_survivor_space(obj);
+      // During an initial mark pause, objects that are pointed to
+      // by the roots need to be marked - even in the event of an
+      // evacuation failure. We pass the template parameter
+      // do_mark_object (which is true for root scanning closures
+      // during an initial mark pause) to copy_to_survivor_space
+      // which will pass it on to the evacuation failure handling
+      // code. The thread that successfully self-forwards a root
+      // object to itself is responsible for marking the object.
+      bool should_mark_root = do_mark_object;
+
+      // We need to mark the copied object if we're a root scanning
+      // closure during an initial mark pause (i.e. do_mark_object
+      // will be true), or the object is already marked and we need
+      // to propagate the mark to the evacuated copy.
+      bool should_mark_copy = do_mark_object ||
+                              _during_initial_mark ||
+                              (_mark_in_progress && !_g1->is_obj_ill(obj));
+
+      oop copy_oop = copy_to_survivor_space(obj, should_mark_root,
+                                                 should_mark_copy);
       oopDesc::encode_store_heap_oop(p, copy_oop);
     }
     // When scanning the RS, we only care about objs in CS.
     if (barrier == G1BarrierRS) {
       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
     }
+  } else {
+    // The object is not in collection set. If we're a root scanning
+    // closure during an initial mark pause (i.e. do_mark_object will
+    // be true) then attempt to mark the object.
+    if (do_mark_object) {
+      mark_object(p);
+    }
   }
 
   if (barrier == G1BarrierEvac && obj != NULL) {
@@ -4626,35 +4592,34 @@
     ResourceMark rm;
     HandleMark   hm;
 
+    ReferenceProcessor*             rp = _g1h->ref_processor_stw();
+
     G1ParScanThreadState            pss(_g1h, i);
-    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
-    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
-    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
 
     pss.set_evac_closure(&scan_evac_cl);
     pss.set_evac_failure_closure(&evac_failure_cl);
     pss.set_partial_scan_closure(&partial_scan_cl);
 
-    G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
-    G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
-    G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
-    G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
-
-    G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
-    G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
-    G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
-
-    OopsInHeapRegionClosure        *scan_root_cl;
-    OopsInHeapRegionClosure        *scan_perm_cl;
+    G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
+    G1ParScanPermClosure           only_scan_perm_cl(_g1h, &pss, rp);
+
+    G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
+    G1ParScanAndMarkPermClosure    scan_mark_perm_cl(_g1h, &pss, rp);
+
+    OopClosure*                    scan_root_cl = &only_scan_root_cl;
+    OopsInHeapRegionClosure*       scan_perm_cl = &only_scan_perm_cl;
 
     if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      // We also need to mark copied objects.
       scan_root_cl = &scan_mark_root_cl;
       scan_perm_cl = &scan_mark_perm_cl;
-    } else {
-      scan_root_cl = &only_scan_root_cl;
-      scan_perm_cl = &only_scan_perm_cl;
     }
 
+    G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
+
     pss.start_strong_roots();
     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
                                   SharedHeap::SO_AllClasses,
@@ -4663,6 +4628,7 @@
                                   scan_perm_cl,
                                   i);
     pss.end_strong_roots();
+
     {
       double start = os::elapsedTime();
       G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
@@ -4701,6 +4667,7 @@
                         OopsInHeapRegionClosure* scan_rs,
                         OopsInGenClosure* scan_perm,
                         int worker_i) {
+
   // First scan the strong roots, including the perm gen.
   double ext_roots_start = os::elapsedTime();
   double closure_app_time_sec = 0.0;
@@ -4719,17 +4686,29 @@
                        &eager_scan_code_roots,
                        &buf_scan_perm);
 
-  // Finish up any enqueued closure apps.
+  // Now the CM ref_processor roots.
+  if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
+    // We need to treat the discovered reference lists of the
+    // concurrent mark ref processor as roots and keep entries
+    // (which are added by the marking threads) on them live
+    // until they can be processed at the end of marking.
+    ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
+  }
+
+  // Finish up any enqueued closure apps (attributed as object copy time).
   buf_scan_non_heap_roots.done();
   buf_scan_perm.done();
+
   double ext_roots_end = os::elapsedTime();
+
   g1_policy()->reset_obj_copy_time(worker_i);
-  double obj_copy_time_sec =
-    buf_scan_non_heap_roots.closure_app_seconds() +
-    buf_scan_perm.closure_app_seconds();
+  double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
+                                buf_scan_non_heap_roots.closure_app_seconds();
   g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
+
   double ext_root_time_ms =
     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
+
   g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
 
   // Scan strong roots in mark stack.
@@ -4739,21 +4718,11 @@
   double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
   g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
 
-  // XXX What should this be doing in the parallel case?
-  g1_policy()->record_collection_pause_end_CH_strong_roots();
   // Now scan the complement of the collection set.
   if (scan_rs != NULL) {
     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
   }
-  // Finish with the ref_processor roots.
-  if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
-    // We need to treat the discovered reference lists as roots and
-    // keep entries (which are added by the marking threads) on them
-    // live until they can be processed at the end of marking.
-    ref_processor()->weak_oops_do(scan_non_heap_roots);
-    ref_processor()->oops_do(scan_non_heap_roots);
-  }
-  g1_policy()->record_collection_pause_end_G1_strong_roots();
+
   _process_strong_tasks->all_tasks_completed();
 }
 
@@ -4764,22 +4733,522 @@
   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
 }
 
-
-class SaveMarksClosure: public HeapRegionClosure {
+// Weak Reference Processing support
+
+// An always "is_alive" closure that is used to preserve referents.
+// If the object is non-null then it's alive.  Used in the preservation
+// of referent objects that are pointed to by reference objects
+// discovered by the CM ref processor.
+class G1AlwaysAliveClosure: public BoolObjectClosure {
+  G1CollectedHeap* _g1;
 public:
-  bool doHeapRegion(HeapRegion* r) {
-    r->save_marks();
+  G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  void do_object(oop p) { assert(false, "Do not call."); }
+  bool do_object_b(oop p) {
+    if (p != NULL) {
+      return true;
+    }
     return false;
   }
 };
 
-void G1CollectedHeap::save_marks() {
-  if (!CollectedHeap::use_parallel_gc_threads()) {
-    SaveMarksClosure sm;
-    heap_region_iterate(&sm);
-  }
-  // We do this even in the parallel case
-  perm_gen()->save_marks();
+bool G1STWIsAliveClosure::do_object_b(oop p) {
+  // An object is reachable if it is outside the collection set,
+  // or is inside and copied.
+  return !_g1->obj_in_cs(p) || p->is_forwarded();
+}
+
+// Non Copying Keep Alive closure
+class G1KeepAliveClosure: public OopClosure {
+  G1CollectedHeap* _g1;
+public:
+  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
+  void do_oop(      oop* p) {
+    oop obj = *p;
+
+    if (_g1->obj_in_cs(obj)) {
+      assert( obj->is_forwarded(), "invariant" );
+      *p = obj->forwardee();
+    }
+  }
+};
+
+// Copying Keep Alive closure - can be called from both
+// serial and parallel code as long as different worker
+// threads utilize different G1ParScanThreadState instances
+// and different queues.
+
+class G1CopyingKeepAliveClosure: public OopClosure {
+  G1CollectedHeap*         _g1h;
+  OopClosure*              _copy_non_heap_obj_cl;
+  OopsInHeapRegionClosure* _copy_perm_obj_cl;
+  G1ParScanThreadState*    _par_scan_state;
+
+public:
+  G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
+                            OopClosure* non_heap_obj_cl,
+                            OopsInHeapRegionClosure* perm_obj_cl,
+                            G1ParScanThreadState* pss):
+    _g1h(g1h),
+    _copy_non_heap_obj_cl(non_heap_obj_cl),
+    _copy_perm_obj_cl(perm_obj_cl),
+    _par_scan_state(pss)
+  {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(      oop* p) { do_oop_work(p); }
+
+  template <class T> void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+
+    if (_g1h->obj_in_cs(obj)) {
+      // If the referent object has been forwarded (either copied
+      // to a new location or to itself in the event of an
+      // evacuation failure) then we need to update the reference
+      // field and, if both reference and referent are in the G1
+      // heap, update the RSet for the referent.
+      //
+      // If the referent has not been forwarded then we have to keep
+      // it alive by policy. Therefore we have copy the referent.
+      //
+      // If the reference field is in the G1 heap then we can push
+      // on the PSS queue. When the queue is drained (after each
+      // phase of reference processing) the object and it's followers
+      // will be copied, the reference field set to point to the
+      // new location, and the RSet updated. Otherwise we need to
+      // use the the non-heap or perm closures directly to copy
+      // the refernt object and update the pointer, while avoiding
+      // updating the RSet.
+
+      if (_g1h->is_in_g1_reserved(p)) {
+        _par_scan_state->push_on_queue(p);
+      } else {
+        // The reference field is not in the G1 heap.
+        if (_g1h->perm_gen()->is_in(p)) {
+          _copy_perm_obj_cl->do_oop(p);
+        } else {
+          _copy_non_heap_obj_cl->do_oop(p);
+        }
+      }
+    }
+  }
+};
+
+// Serial drain queue closure. Called as the 'complete_gc'
+// closure for each discovered list in some of the
+// reference processing phases.
+
+class G1STWDrainQueueClosure: public VoidClosure {
+protected:
+  G1CollectedHeap* _g1h;
+  G1ParScanThreadState* _par_scan_state;
+
+  G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
+
+public:
+  G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
+    _g1h(g1h),
+    _par_scan_state(pss)
+  { }
+
+  void do_void() {
+    G1ParScanThreadState* const pss = par_scan_state();
+    pss->trim_queue();
+  }
+};
+
+// Parallel Reference Processing closures
+
+// Implementation of AbstractRefProcTaskExecutor for parallel reference
+// processing during G1 evacuation pauses.
+
+class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+private:
+  G1CollectedHeap*   _g1h;
+  RefToScanQueueSet* _queues;
+  WorkGang*          _workers;
+  int                _active_workers;
+
+public:
+  G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
+                        WorkGang* workers,
+                        RefToScanQueueSet *task_queues,
+                        int n_workers) :
+    _g1h(g1h),
+    _queues(task_queues),
+    _workers(workers),
+    _active_workers(n_workers)
+  {
+    assert(n_workers > 0, "shouldn't call this otherwise");
+  }
+
+  // Executes the given task using concurrent marking worker threads.
+  virtual void execute(ProcessTask& task);
+  virtual void execute(EnqueueTask& task);
+};
+
+// Gang task for possibly parallel reference processing
+
+class G1STWRefProcTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+  ProcessTask&     _proc_task;
+  G1CollectedHeap* _g1h;
+  RefToScanQueueSet *_task_queues;
+  ParallelTaskTerminator* _terminator;
+
+public:
+  G1STWRefProcTaskProxy(ProcessTask& proc_task,
+                     G1CollectedHeap* g1h,
+                     RefToScanQueueSet *task_queues,
+                     ParallelTaskTerminator* terminator) :
+    AbstractGangTask("Process reference objects in parallel"),
+    _proc_task(proc_task),
+    _g1h(g1h),
+    _task_queues(task_queues),
+    _terminator(terminator)
+  {}
+
+  virtual void work(int i) {
+    // The reference processing task executed by a single worker.
+    ResourceMark rm;
+    HandleMark   hm;
+
+    G1STWIsAliveClosure is_alive(_g1h);
+
+    G1ParScanThreadState pss(_g1h, i);
+
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
+
+    pss.set_evac_closure(&scan_evac_cl);
+    pss.set_evac_failure_closure(&evac_failure_cl);
+    pss.set_partial_scan_closure(&partial_scan_cl);
+
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
+
+    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
+    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
+
+    if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      // We also need to mark copied objects.
+      copy_non_heap_cl = &copy_mark_non_heap_cl;
+      copy_perm_cl = &copy_mark_perm_cl;
+    }
+
+    // Keep alive closure.
+    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
+
+    // Complete GC closure
+    G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
+
+    // Call the reference processing task's work routine.
+    _proc_task.work(i, is_alive, keep_alive, drain_queue);
+
+    // Note we cannot assert that the refs array is empty here as not all
+    // of the processing tasks (specifically phase2 - pp2_work) execute
+    // the complete_gc closure (which ordinarily would drain the queue) so
+    // the queue may not be empty.
+  }
+};
+
+// Driver routine for parallel reference processing.
+// Creates an instance of the ref processing gang
+// task and has the worker threads execute it.
+void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
+  assert(_workers != NULL, "Need parallel worker threads.");
+
+  ParallelTaskTerminator terminator(_active_workers, _queues);
+  G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
+
+  _g1h->set_par_threads(_active_workers);
+  _workers->run_task(&proc_task_proxy);
+  _g1h->set_par_threads(0);
+}
+
+// Gang task for parallel reference enqueueing.
+
+class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
+  EnqueueTask& _enq_task;
+
+public:
+  G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
+    AbstractGangTask("Enqueue reference objects in parallel"),
+    _enq_task(enq_task)
+  { }
+
+  virtual void work(int i) {
+    _enq_task.work(i);
+  }
+};
+
+// Driver routine for parallel reference enqueing.
+// Creates an instance of the ref enqueueing gang
+// task and has the worker threads execute it.
+
+void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
+  assert(_workers != NULL, "Need parallel worker threads.");
+
+  G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
+
+  _g1h->set_par_threads(_active_workers);
+  _workers->run_task(&enq_task_proxy);
+  _g1h->set_par_threads(0);
+}
+
+// End of weak reference support closures
+
+// Abstract task used to preserve (i.e. copy) any referent objects
+// that are in the collection set and are pointed to by reference
+// objects discovered by the CM ref processor.
+
+class G1ParPreserveCMReferentsTask: public AbstractGangTask {
+protected:
+  G1CollectedHeap* _g1h;
+  RefToScanQueueSet      *_queues;
+  ParallelTaskTerminator _terminator;
+  int _n_workers;
+
+public:
+  G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
+    AbstractGangTask("ParPreserveCMReferents"),
+    _g1h(g1h),
+    _queues(task_queues),
+    _terminator(workers, _queues),
+    _n_workers(workers)
+  { }
+
+  void work(int i) {
+    ResourceMark rm;
+    HandleMark   hm;
+
+    G1ParScanThreadState            pss(_g1h, i);
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
+
+    pss.set_evac_closure(&scan_evac_cl);
+    pss.set_evac_failure_closure(&evac_failure_cl);
+    pss.set_partial_scan_closure(&partial_scan_cl);
+
+    assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
+
+
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
+
+    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
+    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
+
+    if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      // We also need to mark copied objects.
+      copy_non_heap_cl = &copy_mark_non_heap_cl;
+      copy_perm_cl = &copy_mark_perm_cl;
+    }
+
+    // Is alive closure
+    G1AlwaysAliveClosure always_alive(_g1h);
+
+    // Copying keep alive closure. Applied to referent objects that need
+    // to be copied.
+    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
+
+    ReferenceProcessor* rp = _g1h->ref_processor_cm();
+
+    int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
+    int stride = MIN2(MAX2(_n_workers, 1), limit);
+
+    // limit is set using max_num_q() - which was set using ParallelGCThreads.
+    // So this must be true - but assert just in case someone decides to
+    // change the worker ids.
+    assert(0 <= i && i < limit, "sanity");
+    assert(!rp->discovery_is_atomic(), "check this code");
+
+    // Select discovered lists [i, i+stride, i+2*stride,...,limit)
+    for (int idx = i; idx < limit; idx += stride) {
+      DiscoveredList& ref_list = rp->discovered_soft_refs()[idx];
+
+      DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
+      while (iter.has_next()) {
+        // Since discovery is not atomic for the CM ref processor, we
+        // can see some null referent objects.
+        iter.load_ptrs(DEBUG_ONLY(true));
+        oop ref = iter.obj();
+
+        // This will filter nulls.
+        if (iter.is_referent_alive()) {
+          iter.make_referent_alive();
+        }
+        iter.move_to_next();
+      }
+    }
+
+    // Drain the queue - which may cause stealing
+    G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
+    drain_queue.do_void();
+    // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
+    assert(pss.refs()->is_empty(), "should be");
+  }
+};
+
+// Weak Reference processing during an evacuation pause (part 1).
+void G1CollectedHeap::process_discovered_references() {
+  double ref_proc_start = os::elapsedTime();
+
+  ReferenceProcessor* rp = _ref_processor_stw;
+  assert(rp->discovery_enabled(), "should have been enabled");
+
+  // Any reference objects, in the collection set, that were 'discovered'
+  // by the CM ref processor should have already been copied (either by
+  // applying the external root copy closure to the discovered lists, or
+  // by following an RSet entry).
+  //
+  // But some of the referents, that are in the collection set, that these
+  // reference objects point to may not have been copied: the STW ref
+  // processor would have seen that the reference object had already
+  // been 'discovered' and would have skipped discovering the reference,
+  // but would not have treated the reference object as a regular oop.
+  // As a reult the copy closure would not have been applied to the
+  // referent object.
+  //
+  // We need to explicitly copy these referent objects - the references
+  // will be processed at the end of remarking.
+  //
+  // We also need to do this copying before we process the reference
+  // objects discovered by the STW ref processor in case one of these
+  // referents points to another object which is also referenced by an
+  // object discovered by the STW ref processor.
+
+  int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                        workers()->total_workers() : 1);
+
+  set_par_threads(n_workers);
+  G1ParPreserveCMReferentsTask keep_cm_referents(this, n_workers, _task_queues);
+
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
+    workers()->run_task(&keep_cm_referents);
+  } else {
+    keep_cm_referents.work(0);
+  }
+
+  set_par_threads(0);
+
+  // Closure to test whether a referent is alive.
+  G1STWIsAliveClosure is_alive(this);
+
+  // Even when parallel reference processing is enabled, the processing
+  // of JNI refs is serial and performed serially by the current thread
+  // rather than by a worker. The following PSS will be used for processing
+  // JNI refs.
+
+  // Use only a single queue for this PSS.
+  G1ParScanThreadState pss(this, 0);
+
+  // We do not embed a reference processor in the copying/scanning
+  // closures while we're actually processing the discovered
+  // reference objects.
+  G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
+  G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
+  G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
+
+  pss.set_evac_closure(&scan_evac_cl);
+  pss.set_evac_failure_closure(&evac_failure_cl);
+  pss.set_partial_scan_closure(&partial_scan_cl);
+
+  assert(pss.refs()->is_empty(), "pre-condition");
+
+  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
+  G1ParScanPermClosure           only_copy_perm_cl(this, &pss, NULL);
+
+  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
+  G1ParScanAndMarkPermClosure    copy_mark_perm_cl(this, &pss, NULL);
+
+  OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
+  OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
+
+  if (_g1h->g1_policy()->during_initial_mark_pause()) {
+    // We also need to mark copied objects.
+    copy_non_heap_cl = &copy_mark_non_heap_cl;
+    copy_perm_cl = &copy_mark_perm_cl;
+  }
+
+  // Keep alive closure.
+  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
+
+  // Serial Complete GC closure
+  G1STWDrainQueueClosure drain_queue(this, &pss);
+
+  // Setup the soft refs policy...
+  rp->setup_policy(false);
+
+  if (!rp->processing_is_mt()) {
+    // Serial reference processing...
+    rp->process_discovered_references(&is_alive,
+                                      &keep_alive,
+                                      &drain_queue,
+                                      NULL);
+  } else {
+    // Parallel reference processing
+    int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
+    assert(rp->num_q() == active_workers, "sanity");
+    assert(active_workers <= rp->max_num_q(), "sanity");
+
+    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
+    rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
+  }
+
+  // We have completed copying any necessary live referent objects
+  // (that were not copied during the actual pause) so we can
+  // retire any active alloc buffers
+  pss.retire_alloc_buffers();
+  assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
+
+  double ref_proc_time = os::elapsedTime() - ref_proc_start;
+  g1_policy()->record_ref_proc_time(ref_proc_time * 1000.0);
+}
+
+// Weak Reference processing during an evacuation pause (part 2).
+void G1CollectedHeap::enqueue_discovered_references() {
+  double ref_enq_start = os::elapsedTime();
+
+  ReferenceProcessor* rp = _ref_processor_stw;
+  assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
+
+  // Now enqueue any remaining on the discovered lists on to
+  // the pending list.
+  if (!rp->processing_is_mt()) {
+    // Serial reference processing...
+    rp->enqueue_discovered_references();
+  } else {
+    // Parallel reference enqueuing
+
+    int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
+    assert(rp->num_q() == active_workers, "sanity");
+    assert(active_workers <= rp->max_num_q(), "sanity");
+
+    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
+    rp->enqueue_discovered_references(&par_task_executor);
+  }
+
+  rp->verify_no_references_recorded();
+  assert(!rp->discovery_enabled(), "should have been disabled");
+
+  // FIXME
+  // CM's reference processing also cleans up the string and symbol tables.
+  // Should we do that here also? We could, but it is a serial operation
+  // and could signicantly increase the pause time.
+
+  double ref_enq_time = os::elapsedTime() - ref_enq_start;
+  g1_policy()->record_ref_enq_time(ref_enq_time * 1000.0);
 }
 
 void G1CollectedHeap::evacuate_collection_set() {
@@ -4799,6 +5268,7 @@
 
   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
   double start_par = os::elapsedTime();
+
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     // The individual threads will set their evac-failure closures.
     StrongRootsScope srs(this);
@@ -4812,21 +5282,25 @@
   double par_time = (os::elapsedTime() - start_par) * 1000.0;
   g1_policy()->record_par_time(par_time);
   set_par_threads(0);
-  // Is this the right thing to do here?  We don't save marks
-  // on individual heap regions when we allocate from
-  // them in parallel, so this seems like the correct place for this.
-  retire_all_alloc_regions();
+
+  // Process any discovered reference objects - we have
+  // to do this _before_ we retire the GC alloc regions
+  // as we may have to copy some 'reachable' referent
+  // objects (and their reachable sub-graphs) that were
+  // not copied during the pause.
+  process_discovered_references();
 
   // Weak root processing.
   // Note: when JSR 292 is enabled and code blobs can contain
   // non-perm oops then we will need to process the code blobs
   // here too.
   {
-    G1IsAliveClosure is_alive(this);
+    G1STWIsAliveClosure is_alive(this);
     G1KeepAliveClosure keep_alive(this);
     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
   }
-  release_gc_alloc_regions(false /* totally */);
+
+  release_gc_alloc_regions();
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
   concurrent_g1_refine()->clear_hot_cache();
@@ -4847,6 +5321,15 @@
     }
   }
 
+  // Enqueue any remaining references remaining on the STW
+  // reference processor's discovered lists. We need to do
+  // this after the card table is cleaned (and verified) as
+  // the act of enqueuing entries on to the pending list
+  // will log these updates (and dirty their associated
+  // cards). We need these updates logged to update any
+  // RSets.
+  enqueue_discovered_references();
+
   if (G1DeferredRSUpdate) {
     RedirtyLoggedCardTableEntryFastClosure redirty;
     dirty_card_queue_set().set_closure(&redirty);
@@ -4906,10 +5389,10 @@
   hr->set_notHumongous();
   free_region(hr, &hr_pre_used, free_list, par);
 
-  int i = hr->hrs_index() + 1;
+  size_t i = hr->hrs_index() + 1;
   size_t num = 1;
-  while ((size_t) i < n_regions()) {
-    HeapRegion* curr_hr = _hrs->at(i);
+  while (i < n_regions()) {
+    HeapRegion* curr_hr = region_at(i);
     if (!curr_hr->continuesHumongous()) {
       break;
     }
@@ -4947,68 +5430,31 @@
   }
 }
 
-void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
-  while (list != NULL) {
-    guarantee( list->is_young(), "invariant" );
-
-    HeapWord* bottom = list->bottom();
-    HeapWord* end = list->end();
-    MemRegion mr(bottom, end);
-    ct_bs->dirty(mr);
-
-    list = list->get_next_young_region();
-  }
-}
-
-
 class G1ParCleanupCTTask : public AbstractGangTask {
   CardTableModRefBS* _ct_bs;
   G1CollectedHeap* _g1h;
   HeapRegion* volatile _su_head;
 public:
   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
-                     G1CollectedHeap* g1h,
-                     HeapRegion* survivor_list) :
+                     G1CollectedHeap* g1h) :
     AbstractGangTask("G1 Par Cleanup CT Task"),
-    _ct_bs(ct_bs),
-    _g1h(g1h),
-    _su_head(survivor_list)
-  { }
+    _ct_bs(ct_bs), _g1h(g1h) { }
 
   void work(int i) {
     HeapRegion* r;
     while (r = _g1h->pop_dirty_cards_region()) {
       clear_cards(r);
     }
-    // Redirty the cards of the survivor regions.
-    dirty_list(&this->_su_head);
   }
 
   void clear_cards(HeapRegion* r) {
-    // Cards for Survivor regions will be dirtied later.
+    // Cards of the survivors should have already been dirtied.
     if (!r->is_survivor()) {
       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
     }
   }
-
-  void dirty_list(HeapRegion* volatile * head_ptr) {
-    HeapRegion* head;
-    do {
-      // Pop region off the list.
-      head = *head_ptr;
-      if (head != NULL) {
-        HeapRegion* r = (HeapRegion*)
-          Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
-        if (r == head) {
-          assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
-          _ct_bs->dirty(MemRegion(r->bottom(), r->end()));
-        }
-      }
-    } while (*head_ptr != NULL);
-  }
 };
 
-
 #ifndef PRODUCT
 class G1VerifyCardTableCleanup: public HeapRegionClosure {
   G1CollectedHeap* _g1h;
@@ -5064,8 +5510,7 @@
   double start = os::elapsedTime();
 
   // Iterate over the dirty cards region list.
-  G1ParCleanupCTTask cleanup_task(ct_bs, this,
-                                  _young_list->first_survivor_region());
+  G1ParCleanupCTTask cleanup_task(ct_bs, this);
 
   if (ParallelGCThreads > 0) {
     set_par_threads(workers()->total_workers());
@@ -5082,14 +5527,10 @@
       }
       r->set_next_dirty_cards_region(NULL);
     }
-    // now, redirty the cards of the survivor regions
-    // (it seemed faster to do it this way, instead of iterating over
-    // all regions and then clearing / dirtying as appropriate)
-    dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
   }
 
   double elapsed = os::elapsedTime() - start;
-  g1_policy()->record_clear_ct_time( elapsed * 1000.0);
+  g1_policy()->record_clear_ct_time(elapsed * 1000.0);
 #ifndef PRODUCT
   if (G1VerifyCTCleanup || VerifyAfterGC) {
     G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
@@ -5269,16 +5710,6 @@
   }
 }
 
-size_t G1CollectedHeap::n_regions() {
-  return _hrs->length();
-}
-
-size_t G1CollectedHeap::max_regions() {
-  return
-    (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) /
-    HeapRegion::GrainBytes;
-}
-
 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
   assert(heap_lock_held_for_gc(),
               "the heap lock should already be held by or for this thread");
@@ -5317,39 +5748,10 @@
 void G1CollectedHeap::empty_young_list() {
   assert(heap_lock_held_for_gc(),
               "the heap lock should already be held by or for this thread");
-  assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
 
   _young_list->empty_list();
 }
 
-bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
-  bool no_allocs = true;
-  for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
-    HeapRegion* r = _gc_alloc_regions[ap];
-    no_allocs = r == NULL || r->saved_mark_at_top();
-  }
-  return no_allocs;
-}
-
-void G1CollectedHeap::retire_all_alloc_regions() {
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    HeapRegion* r = _gc_alloc_regions[ap];
-    if (r != NULL) {
-      // Check for aliases.
-      bool has_processed_alias = false;
-      for (int i = 0; i < ap; ++i) {
-        if (_gc_alloc_regions[i] == r) {
-          has_processed_alias = true;
-          break;
-        }
-      }
-      if (!has_processed_alias) {
-        retire_alloc_region(r, false /* par */);
-      }
-    }
-  }
-}
-
 // Done at the start of full GC.
 void G1CollectedHeap::tear_down_region_lists() {
   _free_list.remove_all();
@@ -5404,18 +5806,21 @@
   }
 }
 
+// Methods for the mutator alloc region
+
 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
                                                       bool force) {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   assert(!force || g1_policy()->can_expand_young_list(),
          "if force is true we should be able to expand the young list");
-  if (force || !g1_policy()->is_young_list_full()) {
+  bool young_list_full = g1_policy()->is_young_list_full();
+  if (force || !young_list_full) {
     HeapRegion* new_alloc_region = new_region(word_size,
                                               false /* do_expand */);
     if (new_alloc_region != NULL) {
       g1_policy()->update_region_num(true /* next_is_young */);
       set_region_short_lived_locked(new_alloc_region);
-      g1mm()->update_eden_counters();
+      _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
       return new_alloc_region;
     }
   }
@@ -5429,6 +5834,11 @@
 
   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
   _summary_bytes_used += allocated_bytes;
+  _hr_printer.retire(alloc_region);
+  // We update the eden sizes here, when the region is retired,
+  // instead of when it's allocated, since this is the point that its
+  // used space has been recored in _summary_bytes_used.
+  g1mm()->update_eden_size();
 }
 
 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
@@ -5441,6 +5851,69 @@
   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
 }
 
+// Methods for the GC alloc regions
+
+HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
+                                                 size_t count,
+                                                 GCAllocPurpose ap) {
+  assert(FreeList_lock->owned_by_self(), "pre-condition");
+
+  if (count < g1_policy()->max_regions(ap)) {
+    HeapRegion* new_alloc_region = new_region(word_size,
+                                              true /* do_expand */);
+    if (new_alloc_region != NULL) {
+      // We really only need to do this for old regions given that we
+      // should never scan survivors. But it doesn't hurt to do it
+      // for survivors too.
+      new_alloc_region->set_saved_mark();
+      if (ap == GCAllocForSurvived) {
+        new_alloc_region->set_survivor();
+        _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
+      } else {
+        _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
+      }
+      return new_alloc_region;
+    } else {
+      g1_policy()->note_alloc_region_limit_reached(ap);
+    }
+  }
+  return NULL;
+}
+
+void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
+                                             size_t allocated_bytes,
+                                             GCAllocPurpose ap) {
+  alloc_region->note_end_of_copying();
+  g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
+  if (ap == GCAllocForSurvived) {
+    young_list()->add_survivor_region(alloc_region);
+  }
+  _hr_printer.retire(alloc_region);
+}
+
+HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
+                                                       bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
+}
+
+void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                          size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForSurvived);
+}
+
+HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
+                                                  bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
+}
+
+void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                     size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForTenured);
+}
 // Heap region set verification
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
@@ -5475,6 +5948,15 @@
   }
 };
 
+HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index,
+                                             HeapWord* bottom) {
+  HeapWord* end = bottom + HeapRegion::GrainWords;
+  MemRegion mr(bottom, end);
+  assert(_g1_reserved.contains(mr), "invariant");
+  // This might return NULL if the allocation fails
+  return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
+}
+
 void G1CollectedHeap::verify_region_sets() {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -27,8 +27,10 @@
 
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/g1HRPrinter.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
+#include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
 #include "gc_implementation/shared/hSpaceCounters.hpp"
 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
@@ -42,7 +44,6 @@
 // heap subsets that will yield large amounts of garbage.
 
 class HeapRegion;
-class HeapRegionSeq;
 class HRRSCleanupTask;
 class PermanentGenerationSpec;
 class GenerationSpec;
@@ -103,6 +104,19 @@
   size_t       length() { return _length; }
   size_t       survivor_length() { return _survivor_length; }
 
+  // Currently we do not keep track of the used byte sum for the
+  // young list and the survivors and it'd be quite a lot of work to
+  // do so. When we'll eventually replace the young list with
+  // instances of HeapRegionLinkedList we'll get that for free. So,
+  // we'll report the more accurate information then.
+  size_t       eden_used_bytes() {
+    assert(length() >= survivor_length(), "invariant");
+    return (length() - survivor_length()) * HeapRegion::GrainBytes;
+  }
+  size_t       survivor_used_bytes() {
+    return survivor_length() * HeapRegion::GrainBytes;
+  }
+
   void rs_length_sampling_init();
   bool rs_length_sampling_more();
   void rs_length_sampling_next();
@@ -141,7 +155,39 @@
     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 };
 
+// The G1 STW is alive closure.
+// An instance is embedded into the G1CH and used as the
+// (optional) _is_alive_non_header closure in the STW
+// reference processor. It is also extensively used during
+// refence processing during STW evacuation pauses.
+class G1STWIsAliveClosure: public BoolObjectClosure {
+  G1CollectedHeap* _g1;
+public:
+  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  void do_object(oop p) { assert(false, "Do not call."); }
+  bool do_object_b(oop p);
+};
+
+class SurvivorGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  SurvivorGCAllocRegion()
+  : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
+};
+
+class OldGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  OldGCAllocRegion()
+  : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
+};
+
 class RefineCardTableEntryClosure;
+
 class G1CollectedHeap : public SharedHeap {
   friend class VM_G1CollectForAllocation;
   friend class VM_GenCollectForPermanentAllocation;
@@ -149,6 +195,8 @@
   friend class VM_G1IncCollectionPause;
   friend class VMStructs;
   friend class MutatorAllocRegion;
+  friend class SurvivorGCAllocRegion;
+  friend class OldGCAllocRegion;
 
   // Closures used in implementation.
   friend class G1ParCopyHelper;
@@ -183,9 +231,6 @@
   // The part of _g1_storage that is currently committed.
   MemRegion _g1_committed;
 
-  // The maximum part of _g1_storage that has ever been committed.
-  MemRegion _g1_max_committed;
-
   // The master free list. It will satisfy all new region allocations.
   MasterFreeRegionList      _free_list;
 
@@ -209,35 +254,38 @@
   void rebuild_region_lists();
 
   // The sequence of all heap regions in the heap.
-  HeapRegionSeq* _hrs;
+  HeapRegionSeq _hrs;
 
   // Alloc region used to satisfy mutator allocation requests.
   MutatorAllocRegion _mutator_alloc_region;
 
+  // Alloc region used to satisfy allocation requests by the GC for
+  // survivor objects.
+  SurvivorGCAllocRegion _survivor_gc_alloc_region;
+
+  // Alloc region used to satisfy allocation requests by the GC for
+  // old objects.
+  OldGCAllocRegion _old_gc_alloc_region;
+
+  // The last old region we allocated to during the last GC.
+  // Typically, it is not full so we should re-use it during the next GC.
+  HeapRegion* _retained_old_gc_alloc_region;
+
   // It resets the mutator alloc region before new allocations can take place.
   void init_mutator_alloc_region();
 
   // It releases the mutator alloc region.
   void release_mutator_alloc_region();
 
-  void abandon_gc_alloc_regions();
+  // It initializes the GC alloc regions at the start of a GC.
+  void init_gc_alloc_regions();
 
-  // The to-space memory regions into which objects are being copied during
-  // a GC.
-  HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
-  size_t _gc_alloc_region_counts[GCAllocPurposeCount];
-  // These are the regions, one per GCAllocPurpose, that are half-full
-  // at the end of a collection and that we want to reuse during the
-  // next collection.
-  HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount];
-  // This specifies whether we will keep the last half-full region at
-  // the end of a collection so that it can be reused during the next
-  // collection (this is specified per GCAllocPurpose)
-  bool _retain_gc_alloc_region[GCAllocPurposeCount];
+  // It releases the GC alloc regions at the end of a GC.
+  void release_gc_alloc_regions();
 
-  // A list of the regions that have been set to be alloc regions in the
-  // current collection.
-  HeapRegion* _gc_alloc_region_list;
+  // It does any cleanup that needs to be done on the GC alloc regions
+  // before a Full GC.
+  void abandon_gc_alloc_regions();
 
   // Helper for monitoring and management support.
   G1MonitoringSupport* _g1mm;
@@ -245,20 +293,6 @@
   // Determines PLAB size for a particular allocation purpose.
   static size_t desired_plab_sz(GCAllocPurpose purpose);
 
-  // When called by par thread, requires the FreeList_lock to be held.
-  void push_gc_alloc_region(HeapRegion* hr);
-
-  // This should only be called single-threaded.  Undeclares all GC alloc
-  // regions.
-  void forget_alloc_region_list();
-
-  // Should be used to set an alloc region, because there's other
-  // associated bookkeeping.
-  void set_gc_alloc_region(int purpose, HeapRegion* r);
-
-  // Check well-formedness of alloc region list.
-  bool check_gc_alloc_regions();
-
   // Outside of GC pauses, the number of bytes used in all regions other
   // than the current allocation region.
   size_t _summary_bytes_used;
@@ -288,6 +322,8 @@
 
   size_t* _surviving_young_words;
 
+  G1HRPrinter _hr_printer;
+
   void setup_surviving_young_words();
   void update_surviving_young_words(size_t* surv_young_words);
   void cleanup_surviving_young_words();
@@ -374,14 +410,7 @@
 
 protected:
 
-  // Returns "true" iff none of the gc alloc regions have any allocations
-  // since the last call to "save_marks".
-  bool all_alloc_regions_no_allocs_since_save_marks();
-  // Perform finalization stuff on all allocation regions.
-  void retire_all_alloc_regions();
-
-  // The number of regions allocated to hold humongous objects.
-  int         _num_humongous_regions;
+  // The young region list.
   YoungList*  _young_list;
 
   // The current policy object for the collector.
@@ -400,21 +429,18 @@
   // request.
   HeapRegion* new_region(size_t word_size, bool do_expand);
 
-  // Try to allocate a new region to be used for allocation by
-  // a GC thread. It will try to expand the heap if no region is
-  // available.
-  HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
-
   // Attempt to satisfy a humongous allocation request of the given
   // size by finding a contiguous set of free regions of num_regions
   // length and remove them from the master free list. Return the
-  // index of the first region or -1 if the search was unsuccessful.
-  int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
+  // index of the first region or G1_NULL_HRS_INDEX if the search
+  // was unsuccessful.
+  size_t humongous_obj_allocate_find_first(size_t num_regions,
+                                           size_t word_size);
 
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
   // humongous region.
-  HeapWord* humongous_obj_allocate_initialize_regions(int first,
+  HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
                                                       size_t num_regions,
                                                       size_t word_size);
 
@@ -434,8 +460,7 @@
   // * All allocation requests for new TLABs should go to
   //   allocate_new_tlab().
   //
-  // * All non-TLAB allocation requests should go to mem_allocate()
-  //   and mem_allocate() should never be called with is_tlab == true.
+  // * All non-TLAB allocation requests should go to mem_allocate().
   //
   // * If either call cannot satisfy the allocation request using the
   //   current allocating region, they will try to get a new one. If
@@ -455,8 +480,6 @@
   virtual HeapWord* allocate_new_tlab(size_t word_size);
 
   virtual HeapWord* mem_allocate(size_t word_size,
-                                 bool   is_noref,
-                                 bool   is_tlab, /* expected to be false */
                                  bool*  gc_overhead_limit_was_exceeded);
 
   // The following three methods take a gc_count_before_ret
@@ -513,16 +536,25 @@
   // that parallel threads might be attempting allocations.
   void par_allocate_remaining_space(HeapRegion* r);
 
-  // Retires an allocation region when it is full or at the end of a
-  // GC pause.
-  void  retire_alloc_region(HeapRegion* alloc_region, bool par);
+  // Allocation attempt during GC for a survivor object / PLAB.
+  inline HeapWord* survivor_attempt_allocation(size_t word_size);
 
-  // These two methods are the "callbacks" from the G1AllocRegion class.
+  // Allocation attempt during GC for an old object / PLAB.
+  inline HeapWord* old_attempt_allocation(size_t word_size);
 
+  // These methods are the "callbacks" from the G1AllocRegion class.
+
+  // For mutator alloc regions.
   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
   void retire_mutator_alloc_region(HeapRegion* alloc_region,
                                    size_t allocated_bytes);
 
+  // For GC alloc regions.
+  HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
+                                  GCAllocPurpose ap);
+  void retire_gc_alloc_region(HeapRegion* alloc_region,
+                              size_t allocated_bytes, GCAllocPurpose ap);
+
   // - if explicit_gc is true, the GC is for a System.gc() or a heap
   //   inspection request and should collect the entire heap
   // - if clear_all_soft_refs is true, all soft references should be
@@ -555,9 +587,20 @@
   // allocated block, or else "NULL".
   HeapWord* expand_and_allocate(size_t word_size);
 
+  // Process any reference objects discovered during
+  // an incremental evacuation pause.
+  void process_discovered_references();
+
+  // Enqueue any remaining discovered references
+  // after processing.
+  void enqueue_discovered_references();
+
 public:
 
-  G1MonitoringSupport* g1mm() { return _g1mm; }
+  G1MonitoringSupport* g1mm() {
+    assert(_g1mm != NULL, "should have been initialized");
+    return _g1mm;
+  }
 
   // Expand the garbage-first heap by at least the given size (in bytes!).
   // Returns true if the heap was expanded by the requested amount;
@@ -574,8 +617,8 @@
   void register_region_with_in_cset_fast_test(HeapRegion* r) {
     assert(_in_cset_fast_test_base != NULL, "sanity");
     assert(r->in_collection_set(), "invariant");
-    int index = r->hrs_index();
-    assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant");
+    size_t index = r->hrs_index();
+    assert(index < _in_cset_fast_test_length, "invariant");
     assert(!_in_cset_fast_test_base[index], "invariant");
     _in_cset_fast_test_base[index] = true;
   }
@@ -626,6 +669,8 @@
     return _full_collections_completed;
   }
 
+  G1HRPrinter* hr_printer() { return &_hr_printer; }
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
@@ -714,9 +759,6 @@
   void g1_process_weak_roots(OopClosure* root_closure,
                              OopClosure* non_root_closure);
 
-  // Invoke "save_marks" on all heap regions.
-  void save_marks();
-
   // Frees a non-humongous region by initializing its contents and
   // adding it to the free list that's passed as a parameter (this is
   // usually a local list which will be appended to the master free
@@ -741,6 +783,11 @@
                              HumongousRegionSet* humongous_proxy_set,
                              bool par);
 
+  // Notifies all the necessary spaces that the committed space has
+  // been updated (either expanded or shrunk). It should be called
+  // after _g1_storage is updated.
+  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
+
   // The concurrent marker (and the thread it runs in.)
   ConcurrentMark* _cm;
   ConcurrentMarkThread* _cmThread;
@@ -800,36 +847,87 @@
   void finalize_for_evac_failure();
 
   // An attempt to evacuate "obj" has failed; take necessary steps.
-  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
+  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
+                                    bool should_mark_root);
   void handle_evacuation_failure_common(oop obj, markOop m);
 
+  // ("Weak") Reference processing support.
+  //
+  // G1 has 2 instances of the referece processor class. One
+  // (_ref_processor_cm) handles reference object discovery
+  // and subsequent processing during concurrent marking cycles.
+  //
+  // The other (_ref_processor_stw) handles reference object
+  // discovery and processing during full GCs and incremental
+  // evacuation pauses.
+  //
+  // During an incremental pause, reference discovery will be
+  // temporarily disabled for _ref_processor_cm and will be
+  // enabled for _ref_processor_stw. At the end of the evacuation
+  // pause references discovered by _ref_processor_stw will be
+  // processed and discovery will be disabled. The previous
+  // setting for reference object discovery for _ref_processor_cm
+  // will be re-instated.
+  //
+  // At the start of marking:
+  //  * Discovery by the CM ref processor is verified to be inactive
+  //    and it's discovered lists are empty.
+  //  * Discovery by the CM ref processor is then enabled.
+  //
+  // At the end of marking:
+  //  * Any references on the CM ref processor's discovered
+  //    lists are processed (possibly MT).
+  //
+  // At the start of full GC we:
+  //  * Disable discovery by the CM ref processor and
+  //    empty CM ref processor's discovered lists
+  //    (without processing any entries).
+  //  * Verify that the STW ref processor is inactive and it's
+  //    discovered lists are empty.
+  //  * Temporarily set STW ref processor discovery as single threaded.
+  //  * Temporarily clear the STW ref processor's _is_alive_non_header
+  //    field.
+  //  * Finally enable discovery by the STW ref processor.
+  //
+  // The STW ref processor is used to record any discovered
+  // references during the full GC.
+  //
+  // At the end of a full GC we:
+  //  * Enqueue any reference objects discovered by the STW ref processor
+  //    that have non-live referents. This has the side-effect of
+  //    making the STW ref processor inactive by disabling discovery.
+  //  * Verify that the CM ref processor is still inactive
+  //    and no references have been placed on it's discovered
+  //    lists (also checked as a precondition during initial marking).
 
-  // Ensure that the relevant gc_alloc regions are set.
-  void get_gc_alloc_regions();
-  // We're done with GC alloc regions. We are going to tear down the
-  // gc alloc list and remove the gc alloc tag from all the regions on
-  // that list. However, we will also retain the last (i.e., the one
-  // that is half-full) GC alloc region, per GCAllocPurpose, for
-  // possible reuse during the next collection, provided
-  // _retain_gc_alloc_region[] indicates that it should be the
-  // case. Said regions are kept in the _retained_gc_alloc_regions[]
-  // array. If the parameter totally is set, we will not retain any
-  // regions, irrespective of what _retain_gc_alloc_region[]
-  // indicates.
-  void release_gc_alloc_regions(bool totally);
-#ifndef PRODUCT
-  // Useful for debugging.
-  void print_gc_alloc_regions();
-#endif // !PRODUCT
+  // The (stw) reference processor...
+  ReferenceProcessor* _ref_processor_stw;
+
+  // During reference object discovery, the _is_alive_non_header
+  // closure (if non-null) is applied to the referent object to
+  // determine whether the referent is live. If so then the
+  // reference object does not need to be 'discovered' and can
+  // be treated as a regular oop. This has the benefit of reducing
+  // the number of 'discovered' reference objects that need to
+  // be processed.
+  //
+  // Instance of the is_alive closure for embedding into the
+  // STW reference processor as the _is_alive_non_header field.
+  // Supplying a value for the _is_alive_non_header field is
+  // optional but doing so prevents unnecessary additions to
+  // the discovered lists during reference discovery.
+  G1STWIsAliveClosure _is_alive_closure_stw;
+
+  // The (concurrent marking) reference processor...
+  ReferenceProcessor* _ref_processor_cm;
 
   // Instance of the concurrent mark is_alive closure for embedding
-  // into the reference processor as the is_alive_non_header. This
-  // prevents unnecessary additions to the discovered lists during
-  // concurrent discovery.
-  G1CMIsAliveClosure _is_alive_closure;
-
-  // ("Weak") Reference processing support
-  ReferenceProcessor* _ref_processor;
+  // into the Concurrent Marking reference processor as the
+  // _is_alive_non_header field. Supplying a value for the
+  // _is_alive_non_header field is optional but doing so prevents
+  // unnecessary additions to the discovered lists during reference
+  // discovery.
+  G1CMIsAliveClosure _is_alive_closure_cm;
 
   enum G1H_process_strong_roots_tasks {
     G1H_PS_mark_stack_oops_do,
@@ -870,6 +968,7 @@
   // specified by the policy object.
   jint initialize();
 
+  // Initialize weak reference processing.
   virtual void ref_processing_init();
 
   void set_par_threads(int t) {
@@ -921,8 +1020,13 @@
   // The shared block offset table array.
   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
 
-  // Reference Processing accessor
-  ReferenceProcessor* ref_processor() { return _ref_processor; }
+  // Reference Processing accessors
+
+  // The STW reference processor....
+  ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
+
+  // The Concurent Marking reference processor...
+  ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 
   virtual size_t capacity() const;
   virtual size_t used() const;
@@ -930,9 +1034,6 @@
   // result might be a bit inaccurate.
   size_t used_unlocked() const;
   size_t recalculate_used() const;
-#ifndef PRODUCT
-  size_t recalculate_used_regions() const;
-#endif // PRODUCT
 
   // These virtual functions do the actual allocation.
   // Some heaps may offer a contiguous region for shared non-blocking
@@ -954,15 +1055,13 @@
   }
 
   // The total number of regions in the heap.
-  size_t n_regions();
+  size_t n_regions() { return _hrs.length(); }
+
+  // The max number of regions in the heap.
+  size_t max_regions() { return _hrs.max_length(); }
 
   // The number of regions that are completely free.
-  size_t max_regions();
-
-  // The number of regions that are completely free.
-  size_t free_regions() {
-    return _free_list.length();
-  }
+  size_t free_regions() { return _free_list.length(); }
 
   // The number of regions that are not completely free.
   size_t used_regions() { return n_regions() - free_regions(); }
@@ -970,6 +1069,10 @@
   // The number of regions available for "regular" expansion.
   size_t expansion_regions() { return _expansion_regions; }
 
+  // Factory method for HeapRegion instances. It will return NULL if
+  // the allocation fails.
+  HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
+
   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
@@ -1090,9 +1193,6 @@
 
   virtual bool is_in_closed_subset(const void* p) const;
 
-  // Dirty card table entries covering a list of young regions.
-  void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
-
   // This resets the card table to all zeros.  It is used after
   // a collection pause which used the card table to claim cards.
   void cleanUpCardTable();
@@ -1131,17 +1231,15 @@
 
   // Iterate over heap regions, in address order, terminating the
   // iteration early if the "doHeapRegion" method returns "true".
-  void heap_region_iterate(HeapRegionClosure* blk);
+  void heap_region_iterate(HeapRegionClosure* blk) const;
 
   // Iterate over heap regions starting with r (or the first region if "r"
   // is NULL), in address order, terminating early if the "doHeapRegion"
   // method returns "true".
-  void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk);
+  void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
 
-  // As above but starting from the region at index idx.
-  void heap_region_iterate_from(int idx, HeapRegionClosure* blk);
-
-  HeapRegion* region_at(size_t idx);
+  // Return the region with the given index. It assumes the index is valid.
+  HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
 
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
@@ -1182,12 +1280,14 @@
 
   // A G1CollectedHeap will contain some number of heap regions.  This
   // finds the region containing a given address, or else returns NULL.
-  HeapRegion* heap_region_containing(const void* addr) const;
+  template <class T>
+  inline HeapRegion* heap_region_containing(const T addr) const;
 
   // Like the above, but requires "addr" to be in the heap (to avoid a
   // null-check), and unlike the above, may return an continuing humongous
   // region.
-  HeapRegion* heap_region_containing_raw(const void* addr) const;
+  template <class T>
+  inline HeapRegion* heap_region_containing_raw(const T addr) const;
 
   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
   // each address in the (reserved) heap is a member of exactly
@@ -1249,7 +1349,7 @@
     return true;
   }
 
-  bool is_in_young(oop obj) {
+  bool is_in_young(const oop obj) {
     HeapRegion* hr = heap_region_containing(obj);
     return hr != NULL && hr->is_young();
   }
@@ -1264,16 +1364,10 @@
   // in the young gen: for the SATB pre-barrier, there is no
   // pre-value that needs to be remembered; for the remembered-set
   // update logging post-barrier, we don't maintain remembered set
-  // information for young gen objects. Note that non-generational
-  // G1 does not have any "young" objects, should not elide
-  // the rs logging barrier and so should always answer false below.
-  // However, non-generational G1 (-XX:-G1Gen) appears to have
-  // bit-rotted so was not tested below.
+  // information for young gen objects.
   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
     // Re 6920090, 6920109 above.
     assert(ReduceInitialCardMarksForG1, "Else cannot be here");
-    assert(G1Gen || !is_in_young(new_obj),
-           "Non-generational G1 should never return true below");
     return is_in_young(new_obj);
   }
 
@@ -1286,10 +1380,6 @@
     return true;
   }
 
-  // The boundary between a "large" and "small" array of primitives, in
-  // words.
-  virtual size_t large_typearray_limit();
-
   // Returns "true" iff the given word_size is "very large".
   static bool isHumongous(size_t word_size) {
     // Note this has to be strictly greater-than as the TLABs
@@ -1329,14 +1419,20 @@
 
   // Perform verification.
 
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
+  // vo == UsePrevMarking  -> use "prev" marking information,
+  // vo == UseNextMarking -> use "next" marking information
+  // vo == UseMarkWord    -> use the mark word in the object header
+  //
   // NOTE: Only the "prev" marking information is guaranteed to be
   // consistent most of the time, so most calls to this should use
-  // use_prev_marking == true. Currently, there is only one case where
-  // this is called with use_prev_marking == false, which is to verify
-  // the "next" marking information at the end of remark.
-  void verify(bool allow_dirty, bool silent, bool use_prev_marking);
+  // vo == UsePrevMarking.
+  // Currently, there is only one case where this is called with
+  // vo == UseNextMarking, which is to verify the "next" marking
+  // information at the end of remark.
+  // Currently there is only one place where this is called with
+  // vo == UseMarkWord, which is to verify the marking during a
+  // full GC.
+  void verify(bool allow_dirty, bool silent, VerifyOption vo);
 
   // Override; it uses the "prev" marking information
   virtual void verify(bool allow_dirty, bool silent);
@@ -1355,10 +1451,9 @@
   // Override
   void print_tracing_info() const;
 
-  // If "addr" is a pointer into the (reserved?) heap, returns a positive
-  // number indicating the "arena" within the heap in which "addr" falls.
-  // Or else returns 0.
-  virtual int addr_to_arena_id(void* addr) const;
+  // The following two methods are helpful for debugging RSet issues.
+  void print_cset_rsets() PRODUCT_RETURN;
+  void print_all_rsets() PRODUCT_RETURN;
 
   // Convenience function to be used in situations where the heap type can be
   // asserted to be this type.
@@ -1389,24 +1484,24 @@
   // bitmap off to the side.
   void doConcurrentMark();
 
-  // This is called from the marksweep collector which then does
-  // a concurrent mark and verifies that the results agree with
-  // the stop the world marking.
-  void checkConcurrentMark();
-  void do_sync_mark();
-
   bool isMarkedPrev(oop obj) const;
   bool isMarkedNext(oop obj) const;
 
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
+  // vo == UsePrevMarking -> use "prev" marking information,
+  // vo == UseNextMarking -> use "next" marking information,
+  // vo == UseMarkWord    -> use mark word from object header
   bool is_obj_dead_cond(const oop obj,
                         const HeapRegion* hr,
-                        const bool use_prev_marking) const {
-    if (use_prev_marking) {
-      return is_obj_dead(obj, hr);
-    } else {
-      return is_obj_ill(obj, hr);
+                        const VerifyOption vo) const {
+
+    switch (vo) {
+      case VerifyOption_G1UsePrevMarking:
+        return is_obj_dead(obj, hr);
+      case VerifyOption_G1UseNextMarking:
+        return is_obj_ill(obj, hr);
+      default:
+        assert(vo == VerifyOption_G1UseMarkWord, "must be");
+        return !obj->is_gc_marked();
     }
   }
 
@@ -1447,18 +1542,24 @@
   // Added if it is in permanent gen it isn't dead.
   // Added if it is NULL it isn't dead.
 
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
+  // vo == UsePrevMarking -> use "prev" marking information,
+  // vo == UseNextMarking -> use "next" marking information,
+  // vo == UseMarkWord    -> use mark word from object header
   bool is_obj_dead_cond(const oop obj,
-                        const bool use_prev_marking) {
-    if (use_prev_marking) {
-      return is_obj_dead(obj);
-    } else {
-      return is_obj_ill(obj);
+                        const VerifyOption vo) const {
+
+    switch (vo) {
+      case VerifyOption_G1UsePrevMarking:
+        return is_obj_dead(obj);
+      case VerifyOption_G1UseNextMarking:
+        return is_obj_ill(obj);
+      default:
+        assert(vo == VerifyOption_G1UseMarkWord, "must be");
+        return !obj->is_gc_marked();
     }
   }
 
-  bool is_obj_dead(const oop obj) {
+  bool is_obj_dead(const oop obj) const {
     const HeapRegion* hr = heap_region_containing(obj);
     if (hr == NULL) {
       if (Universe::heap()->is_in_permanent(obj))
@@ -1469,7 +1570,7 @@
     else return is_obj_dead(obj, hr);
   }
 
-  bool is_obj_ill(const oop obj) {
+  bool is_obj_ill(const oop obj) const {
     const HeapRegion* hr = heap_region_containing(obj);
     if (hr == NULL) {
       if (Universe::heap()->is_in_permanent(obj))
@@ -1715,26 +1816,22 @@
 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
 private:
   bool        _retired;
-  bool        _during_marking;
+  bool        _should_mark_objects;
   GCLabBitMap _bitmap;
 
 public:
-  G1ParGCAllocBuffer(size_t gclab_word_size) :
-    ParGCAllocBuffer(gclab_word_size),
-    _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
-    _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
-    _retired(false)
-  { }
+  G1ParGCAllocBuffer(size_t gclab_word_size);
 
   inline bool mark(HeapWord* addr) {
     guarantee(use_local_bitmaps, "invariant");
-    assert(_during_marking, "invariant");
+    assert(_should_mark_objects, "invariant");
     return _bitmap.mark(addr);
   }
 
   inline void set_buf(HeapWord* buf) {
-    if (use_local_bitmaps && _during_marking)
+    if (use_local_bitmaps && _should_mark_objects) {
       _bitmap.set_buffer(buf);
+    }
     ParGCAllocBuffer::set_buf(buf);
     _retired = false;
   }
@@ -1742,7 +1839,7 @@
   inline void retire(bool end_of_gc, bool retain) {
     if (_retired)
       return;
-    if (use_local_bitmaps && _during_marking) {
+    if (use_local_bitmaps && _should_mark_objects) {
       _bitmap.retire();
     }
     ParGCAllocBuffer::retire(end_of_gc, retain);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -34,9 +34,10 @@
 
 // Inline functions for G1CollectedHeap
 
+template <class T>
 inline HeapRegion*
-G1CollectedHeap::heap_region_containing(const void* addr) const {
-  HeapRegion* hr = _hrs->addr_to_region(addr);
+G1CollectedHeap::heap_region_containing(const T addr) const {
+  HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
   // hr can be null if addr in perm_gen
   if (hr != NULL && hr->continuesHumongous()) {
     hr = hr->humongous_start_region();
@@ -44,19 +45,16 @@
   return hr;
 }
 
+template <class T>
 inline HeapRegion*
-G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
-  assert(_g1_reserved.contains(addr), "invariant");
-  size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
-                                        >> HeapRegion::LogOfHRGrainBytes;
-
-  HeapRegion* res = _hrs->at(index);
-  assert(res == _hrs->addr_to_region(addr), "sanity");
+G1CollectedHeap::heap_region_containing_raw(const T addr) const {
+  assert(_g1_reserved.contains((const void*) addr), "invariant");
+  HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
   return res;
 }
 
 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
-  HeapRegion* r = _hrs->addr_to_region(obj);
+  HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
   return r != NULL && r->in_collection_set();
 }
 
@@ -79,6 +77,38 @@
   return result;
 }
 
+inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
+                                                              word_size) {
+  assert(!isHumongous(word_size),
+         "we should not be seeing humongous-size allocations in this path");
+
+  HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
+                                                      false /* bot_updates */);
+  if (result == NULL) {
+    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
+    result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
+                                                      false /* bot_updates */);
+  }
+  if (result != NULL) {
+    dirty_young_block(result, word_size);
+  }
+  return result;
+}
+
+inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
+  assert(!isHumongous(word_size),
+         "we should not be seeing humongous-size allocations in this path");
+
+  HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
+                                                       true /* bot_updates */);
+  if (result == NULL) {
+    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
+    result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
+                                                       true /* bot_updates */);
+  }
+  return result;
+}
+
 // It dirties the cards that cover the block so that so that the post
 // write barrier never queues anything when updating objects on this
 // block. It is assumed (and in fact we assert) that the block
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -28,6 +28,7 @@
 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1ErgoVerbose.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/shared/gcPolicyCounters.hpp"
 #include "runtime/arguments.hpp"
@@ -134,13 +135,10 @@
 
 G1CollectorPolicy::G1CollectorPolicy() :
   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
-    ? ParallelGCThreads : 1),
-
+                        ? ParallelGCThreads : 1),
 
   _n_pauses(0),
-  _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
-  _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
-  _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
+  _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
@@ -148,13 +146,18 @@
   _stop_world_start(0.0),
   _all_stop_world_times_ms(new NumberSeq()),
   _all_yield_times_ms(new NumberSeq()),
+  _using_new_ratio_calculations(false),
 
   _all_mod_union_times_ms(new NumberSeq()),
 
   _summary(new Summary()),
 
+  _cur_clear_ct_time_ms(0.0),
+
+  _cur_ref_proc_time_ms(0.0),
+  _cur_ref_enq_time_ms(0.0),
+
 #ifndef PRODUCT
-  _cur_clear_ct_time_ms(0.0),
   _min_clear_cc_time_ms(-1.0),
   _max_clear_cc_time_ms(-1.0),
   _cur_clear_cc_time_ms(0.0),
@@ -173,7 +176,6 @@
   _cur_aux_times_ms(new double[_aux_num]),
   _cur_aux_times_set(new bool[_aux_num]),
 
-  _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 
@@ -204,7 +206,6 @@
 
   // </NEW PREDICTION>
 
-  _in_young_gc_mode(false),
   _full_young_gcs(true),
   _full_young_pause_num(0),
   _partial_young_pause_num(0),
@@ -239,6 +240,10 @@
   _should_revert_to_full_young_gcs(false),
   _last_full_young_gc(false),
 
+  _eden_bytes_before_gc(0),
+  _survivor_bytes_before_gc(0),
+  _capacity_before_gc(0),
+
   _prev_collection_pause_used_at_end_bytes(0),
 
   _collection_set(NULL),
@@ -272,15 +277,26 @@
   _recorded_survivor_tail(NULL),
   _survivors_age_table(true),
 
-  _gc_overhead_perc(0.0)
-
-{
+  _gc_overhead_perc(0.0) {
+
   // Set up the region size and associated fields. Given that the
   // policy is created before the heap, we have to set this up here,
   // so it's done as soon as possible.
   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   HeapRegionRemSet::setup_remset_size();
 
+  G1ErgoVerbose::initialize();
+  if (PrintAdaptiveSizePolicy) {
+    // Currently, we only use a single switch for all the heuristics.
+    G1ErgoVerbose::set_enabled(true);
+    // Given that we don't currently have a verboseness level
+    // parameter, we'll hardcode this to high. This can be easily
+    // changed in the future.
+    G1ErgoVerbose::set_level(ErgoHigh);
+  } else {
+    G1ErgoVerbose::set_enabled(false);
+  }
+
   // Verify PLAB sizes
   const uint region_size = HeapRegion::GrainWords;
   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
@@ -399,21 +415,30 @@
   _sigma = (double) G1ConfidencePercent / 100.0;
 
   // start conservatively (around 50ms is about right)
-  _concurrent_mark_init_times_ms->add(0.05);
   _concurrent_mark_remark_times_ms->add(0.05);
   _concurrent_mark_cleanup_times_ms->add(0.20);
   _tenuring_threshold = MaxTenuringThreshold;
-
-  // if G1FixedSurvivorSpaceSize is 0 which means the size is not
-  // fixed, then _max_survivor_regions will be calculated at
-  // calculate_young_list_target_length during initialization
-  _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
+  // _max_survivor_regions will be calculated by
+  // update_young_list_target_length() during initialization.
+  _max_survivor_regions = 0;
 
   assert(GCTimeRatio > 0,
          "we should have set it to a default value set_g1_gc_flags() "
          "if a user set it to 0");
   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 
+  uintx reserve_perc = G1ReservePercent;
+  // Put an artificial ceiling on this so that it's not set to a silly value.
+  if (reserve_perc > 50) {
+    reserve_perc = 50;
+    warning("G1ReservePercent is set to a value that is too large, "
+            "it's been updated to %u", reserve_perc);
+  }
+  _reserve_factor = (double) reserve_perc / 100.0;
+  // This will be set when the heap is expanded
+  // for the first time during initialization.
+  _reserve_regions = 0;
+
   initialize_all();
 }
 
@@ -438,6 +463,7 @@
 // ParallelScavengeHeap::initialize()). We might change this in the
 // future, but it's a good start.
 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
+private:
   size_t size_to_region_num(size_t byte_size) {
     return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
   }
@@ -447,7 +473,6 @@
     initialize_flags();
     initialize_size_info();
   }
-
   size_t min_young_region_num() {
     return size_to_region_num(_min_gen0_size);
   }
@@ -459,6 +484,13 @@
   }
 };
 
+void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
+  assert(number_of_heap_regions > 0, "Heap must be initialized");
+  size_t young_size = number_of_heap_regions / (NewRatio + 1);
+  _min_desired_young_length = young_size;
+  _max_desired_young_length = young_size;
+}
+
 void G1CollectorPolicy::init() {
   // Set aside an initial future to_space.
   _g1 = G1CollectedHeap::heap();
@@ -467,27 +499,36 @@
 
   initialize_gc_policy_counters();
 
-  if (G1Gen) {
-    _in_young_gc_mode = true;
-
-    G1YoungGenSizer sizer;
-    size_t initial_region_num = sizer.initial_young_region_num();
-
-    if (UseAdaptiveSizePolicy) {
-      set_adaptive_young_list_length(true);
-      _young_list_fixed_length = 0;
+  G1YoungGenSizer sizer;
+  size_t initial_region_num = sizer.initial_young_region_num();
+  _min_desired_young_length = sizer.min_young_region_num();
+  _max_desired_young_length = sizer.max_young_region_num();
+
+  if (FLAG_IS_CMDLINE(NewRatio)) {
+    if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
+      warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
     } else {
-      set_adaptive_young_list_length(false);
-      _young_list_fixed_length = initial_region_num;
+      // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
+      update_young_list_size_using_newratio(_g1->n_regions());
+      _using_new_ratio_calculations = true;
     }
-    _free_regions_at_end_of_collection = _g1->free_regions();
-    calculate_young_list_min_length();
-    guarantee( _young_list_min_length == 0, "invariant, not enough info" );
-    calculate_young_list_target_length();
+  }
+
+  // GenCollectorPolicy guarantees that min <= initial <= max.
+  // Asserting here just to state that we rely on this property.
+  assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
+  assert(initial_region_num <= _max_desired_young_length, "Initial young gen size too large");
+  assert(_min_desired_young_length <= initial_region_num, "Initial young gen size too small");
+
+  set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
+  if (adaptive_young_list_length()) {
+    _young_list_fixed_length = 0;
   } else {
-     _young_list_fixed_length = 0;
-    _in_young_gc_mode = false;
+    _young_list_fixed_length = initial_region_num;
   }
+  _free_regions_at_end_of_collection = _g1->free_regions();
+  update_young_list_target_length();
+  _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
 
   // We may immediately start allocating regions and placing them on the
   // collection set list. Initialize the per-collection set info
@@ -495,238 +536,261 @@
 }
 
 // Create the jstat counters for the policy.
-void G1CollectorPolicy::initialize_gc_policy_counters()
-{
-  _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
-}
-
-void G1CollectorPolicy::calculate_young_list_min_length() {
-  _young_list_min_length = 0;
-
-  if (!adaptive_young_list_length())
-    return;
-
-  if (_alloc_rate_ms_seq->num() > 3) {
-    double now_sec = os::elapsedTime();
-    double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
-    double alloc_rate_ms = predict_alloc_rate_ms();
-    size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
-    size_t current_region_num = _g1->young_list()->length();
-    _young_list_min_length = min_regions + current_region_num;
-  }
-}
-
-void G1CollectorPolicy::calculate_young_list_target_length() {
-  if (adaptive_young_list_length()) {
-    size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
-    calculate_young_list_target_length(rs_lengths);
-  } else {
-    if (full_young_gcs())
-      _young_list_target_length = _young_list_fixed_length;
-    else
-      _young_list_target_length = _young_list_fixed_length / 2;
-  }
-
-  // Make sure we allow the application to allocate at least one
-  // region before we need to do a collection again.
-  size_t min_length = _g1->young_list()->length() + 1;
-  _young_list_target_length = MAX2(_young_list_target_length, min_length);
-  calculate_max_gc_locker_expansion();
-  calculate_survivors_policy();
+void G1CollectorPolicy::initialize_gc_policy_counters() {
+  _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 }
 
-void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
-  guarantee( adaptive_young_list_length(), "pre-condition" );
-  guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
-
-  double start_time_sec = os::elapsedTime();
-  size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
-  min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
-  size_t reserve_regions =
-    (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
-
-  if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
-    // we are in fully-young mode and there are free regions in the heap
-
-    double survivor_regions_evac_time =
-        predict_survivor_regions_evac_time();
-
-    double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
-    size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
-    size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
-    size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
-    double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
-                          + survivor_regions_evac_time;
-
-    // the result
-    size_t final_young_length = 0;
-
-    size_t init_free_regions =
-      MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
-
-    // if we're still under the pause target...
-    if (base_time_ms <= target_pause_time_ms) {
-      // We make sure that the shortest young length that makes sense
-      // fits within the target pause time.
-      size_t min_young_length = 1;
-
-      if (predict_will_fit(min_young_length, base_time_ms,
-                                     init_free_regions, target_pause_time_ms)) {
-        // The shortest young length will fit within the target pause time;
-        // we'll now check whether the absolute maximum number of young
-        // regions will fit in the target pause time. If not, we'll do
-        // a binary search between min_young_length and max_young_length
-        size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
-        size_t max_young_length = abs_max_young_length;
-
-        if (max_young_length > min_young_length) {
-          // Let's check if the initial max young length will fit within the
-          // target pause. If so then there is no need to search for a maximal
-          // young length - we'll return the initial maximum
-
-          if (predict_will_fit(max_young_length, base_time_ms,
-                                init_free_regions, target_pause_time_ms)) {
-            // The maximum young length will satisfy the target pause time.
-            // We are done so set min young length to this maximum length.
-            // The code after the loop will then set final_young_length using
-            // the value cached in the minimum length.
-            min_young_length = max_young_length;
-          } else {
-            // The maximum possible number of young regions will not fit within
-            // the target pause time so let's search....
-
-            size_t diff = (max_young_length - min_young_length) / 2;
-            max_young_length = min_young_length + diff;
-
-            while (max_young_length > min_young_length) {
-              if (predict_will_fit(max_young_length, base_time_ms,
-                                        init_free_regions, target_pause_time_ms)) {
-
-                // The current max young length will fit within the target
-                // pause time. Note we do not exit the loop here. By setting
-                // min = max, and then increasing the max below means that
-                // we will continue searching for an upper bound in the
-                // range [max..max+diff]
-                min_young_length = max_young_length;
-              }
-              diff = (max_young_length - min_young_length) / 2;
-              max_young_length = min_young_length + diff;
-            }
-            // the above loop found a maximal young length that will fit
-            // within the target pause time.
-          }
-          assert(min_young_length <= abs_max_young_length, "just checking");
-        }
-        final_young_length = min_young_length;
-      }
-    }
-    // and we're done!
-
-    // we should have at least one region in the target young length
-    _young_list_target_length =
-                              final_young_length + _recorded_survivor_regions;
-
-    // let's keep an eye of how long we spend on this calculation
-    // right now, I assume that we'll print it when we need it; we
-    // should really adde it to the breakdown of a pause
-    double end_time_sec = os::elapsedTime();
-    double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
-
-#ifdef TRACE_CALC_YOUNG_LENGTH
-    // leave this in for debugging, just in case
-    gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
-                           "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
-                           target_pause_time_ms,
-                           _young_list_target_length
-                           elapsed_time_ms,
-                           full_young_gcs() ? "full" : "partial",
-                           during_initial_mark_pause() ? " i-m" : "",
-                           _in_marking_window,
-                           _in_marking_window_im);
-#endif // TRACE_CALC_YOUNG_LENGTH
-
-    if (_young_list_target_length < _young_list_min_length) {
-      // bummer; this means that, if we do a pause when the maximal
-      // length dictates, we'll violate the pause spacing target (the
-      // min length was calculate based on the application's current
-      // alloc rate);
-
-      // so, we have to bite the bullet, and allocate the minimum
-      // number. We'll violate our target, but we just can't meet it.
-
-#ifdef TRACE_CALC_YOUNG_LENGTH
-      // leave this in for debugging, just in case
-      gclog_or_tty->print_cr("adjusted target length from "
-                             SIZE_FORMAT " to " SIZE_FORMAT,
-                             _young_list_target_length, _young_list_min_length);
-#endif // TRACE_CALC_YOUNG_LENGTH
-
-      _young_list_target_length = _young_list_min_length;
-    }
-  } else {
-    // we are in a partially-young mode or we've run out of regions (due
-    // to evacuation failure)
-
-#ifdef TRACE_CALC_YOUNG_LENGTH
-    // leave this in for debugging, just in case
-    gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
-                           _young_list_min_length);
-#endif // TRACE_CALC_YOUNG_LENGTH
-    // we'll do the pause as soon as possible by choosing the minimum
-    _young_list_target_length = _young_list_min_length;
-  }
-
-  _rs_lengths_prediction = rs_lengths;
-}
-
-// This is used by: calculate_young_list_target_length(rs_length). It
-// returns true iff:
-//   the predicted pause time for the given young list will not overflow
-//   the target pause time
-// and:
-//   the predicted amount of surviving data will not overflow the
-//   the amount of free space available for survivor regions.
-//
-bool
-G1CollectorPolicy::predict_will_fit(size_t young_length,
-                                    double base_time_ms,
-                                    size_t init_free_regions,
-                                    double target_pause_time_ms) {
-
-  if (young_length >= init_free_regions)
+bool G1CollectorPolicy::predict_will_fit(size_t young_length,
+                                         double base_time_ms,
+                                         size_t base_free_regions,
+                                         double target_pause_time_ms) {
+  if (young_length >= base_free_regions) {
     // end condition 1: not enough space for the young regions
     return false;
-
-  double accum_surv_rate_adj = 0.0;
-  double accum_surv_rate =
-    accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
-
+  }
+
+  double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   size_t bytes_to_copy =
-    (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
-
+               (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
-
-  double young_other_time_ms =
-                       predict_young_other_time_ms(young_length);
-
-  double pause_time_ms =
-                   base_time_ms + copy_time_ms + young_other_time_ms;
-
-  if (pause_time_ms > target_pause_time_ms)
-    // end condition 2: over the target pause time
+  double young_other_time_ms = predict_young_other_time_ms(young_length);
+  double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
+  if (pause_time_ms > target_pause_time_ms) {
+    // end condition 2: prediction is over the target pause time
     return false;
+  }
 
   size_t free_bytes =
-                 (init_free_regions - young_length) * HeapRegion::GrainBytes;
-
-  if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes)
-    // end condition 3: out of to-space (conservatively)
+                  (base_free_regions - young_length) * HeapRegion::GrainBytes;
+  if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
+    // end condition 3: out-of-space (conservatively!)
     return false;
+  }
 
   // success!
   return true;
 }
 
+void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
+  // re-calculate the necessary reserve
+  double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
+  // We use ceiling so that if reserve_regions_d is > 0.0 (but
+  // smaller than 1.0) we'll get 1.
+  _reserve_regions = (size_t) ceil(reserve_regions_d);
+
+  if (_using_new_ratio_calculations) {
+    // -XX:NewRatio was specified so we need to update the
+    // young gen length when the heap size has changed.
+    update_young_list_size_using_newratio(new_number_of_regions);
+  }
+}
+
+size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
+                                                     size_t base_min_length) {
+  size_t desired_min_length = 0;
+  if (adaptive_young_list_length()) {
+    if (_alloc_rate_ms_seq->num() > 3) {
+      double now_sec = os::elapsedTime();
+      double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
+      double alloc_rate_ms = predict_alloc_rate_ms();
+      desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
+    } else {
+      // otherwise we don't have enough info to make the prediction
+    }
+  }
+  desired_min_length += base_min_length;
+  // make sure we don't go below any user-defined minimum bound
+  return MAX2(_min_desired_young_length, desired_min_length);
+}
+
+size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
+  // Here, we might want to also take into account any additional
+  // constraints (i.e., user-defined minimum bound). Currently, we
+  // effectively don't set this bound.
+  return _max_desired_young_length;
+}
+
+void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
+  if (rs_lengths == (size_t) -1) {
+    // if it's set to the default value (-1), we should predict it;
+    // otherwise, use the given value.
+    rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
+  }
+
+  // Calculate the absolute and desired min bounds.
+
+  // This is how many young regions we already have (currently: the survivors).
+  size_t base_min_length = recorded_survivor_regions();
+  // This is the absolute minimum young length, which ensures that we
+  // can allocate one eden region in the worst-case.
+  size_t absolute_min_length = base_min_length + 1;
+  size_t desired_min_length =
+                     calculate_young_list_desired_min_length(base_min_length);
+  if (desired_min_length < absolute_min_length) {
+    desired_min_length = absolute_min_length;
+  }
+
+  // Calculate the absolute and desired max bounds.
+
+  // We will try our best not to "eat" into the reserve.
+  size_t absolute_max_length = 0;
+  if (_free_regions_at_end_of_collection > _reserve_regions) {
+    absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
+  }
+  size_t desired_max_length = calculate_young_list_desired_max_length();
+  if (desired_max_length > absolute_max_length) {
+    desired_max_length = absolute_max_length;
+  }
+
+  size_t young_list_target_length = 0;
+  if (adaptive_young_list_length()) {
+    if (full_young_gcs()) {
+      young_list_target_length =
+                        calculate_young_list_target_length(rs_lengths,
+                                                           base_min_length,
+                                                           desired_min_length,
+                                                           desired_max_length);
+      _rs_lengths_prediction = rs_lengths;
+    } else {
+      // Don't calculate anything and let the code below bound it to
+      // the desired_min_length, i.e., do the next GC as soon as
+      // possible to maximize how many old regions we can add to it.
+    }
+  } else {
+    if (full_young_gcs()) {
+      young_list_target_length = _young_list_fixed_length;
+    } else {
+      // A bit arbitrary: during partially-young GCs we allocate half
+      // the young regions to try to add old regions to the CSet.
+      young_list_target_length = _young_list_fixed_length / 2;
+      // We choose to accept that we might go under the desired min
+      // length given that we intentionally ask for a smaller young gen.
+      desired_min_length = absolute_min_length;
+    }
+  }
+
+  // Make sure we don't go over the desired max length, nor under the
+  // desired min length. In case they clash, desired_min_length wins
+  // which is why that test is second.
+  if (young_list_target_length > desired_max_length) {
+    young_list_target_length = desired_max_length;
+  }
+  if (young_list_target_length < desired_min_length) {
+    young_list_target_length = desired_min_length;
+  }
+
+  assert(young_list_target_length > recorded_survivor_regions(),
+         "we should be able to allocate at least one eden region");
+  assert(young_list_target_length >= absolute_min_length, "post-condition");
+  _young_list_target_length = young_list_target_length;
+
+  update_max_gc_locker_expansion();
+}
+
+size_t
+G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
+                                                   size_t base_min_length,
+                                                   size_t desired_min_length,
+                                                   size_t desired_max_length) {
+  assert(adaptive_young_list_length(), "pre-condition");
+  assert(full_young_gcs(), "only call this for fully-young GCs");
+
+  // In case some edge-condition makes the desired max length too small...
+  if (desired_max_length <= desired_min_length) {
+    return desired_min_length;
+  }
+
+  // We'll adjust min_young_length and max_young_length not to include
+  // the already allocated young regions (i.e., so they reflect the
+  // min and max eden regions we'll allocate). The base_min_length
+  // will be reflected in the predictions by the
+  // survivor_regions_evac_time prediction.
+  assert(desired_min_length > base_min_length, "invariant");
+  size_t min_young_length = desired_min_length - base_min_length;
+  assert(desired_max_length > base_min_length, "invariant");
+  size_t max_young_length = desired_max_length - base_min_length;
+
+  double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
+  double survivor_regions_evac_time = predict_survivor_regions_evac_time();
+  size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
+  size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
+  size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
+  double base_time_ms =
+    predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
+    survivor_regions_evac_time;
+  size_t available_free_regions = _free_regions_at_end_of_collection;
+  size_t base_free_regions = 0;
+  if (available_free_regions > _reserve_regions) {
+    base_free_regions = available_free_regions - _reserve_regions;
+  }
+
+  // Here, we will make sure that the shortest young length that
+  // makes sense fits within the target pause time.
+
+  if (predict_will_fit(min_young_length, base_time_ms,
+                       base_free_regions, target_pause_time_ms)) {
+    // The shortest young length will fit into the target pause time;
+    // we'll now check whether the absolute maximum number of young
+    // regions will fit in the target pause time. If not, we'll do
+    // a binary search between min_young_length and max_young_length.
+    if (predict_will_fit(max_young_length, base_time_ms,
+                         base_free_regions, target_pause_time_ms)) {
+      // The maximum young length will fit into the target pause time.
+      // We are done so set min young length to the maximum length (as
+      // the result is assumed to be returned in min_young_length).
+      min_young_length = max_young_length;
+    } else {
+      // The maximum possible number of young regions will not fit within
+      // the target pause time so we'll search for the optimal
+      // length. The loop invariants are:
+      //
+      // min_young_length < max_young_length
+      // min_young_length is known to fit into the target pause time
+      // max_young_length is known not to fit into the target pause time
+      //
+      // Going into the loop we know the above hold as we've just
+      // checked them. Every time around the loop we check whether
+      // the middle value between min_young_length and
+      // max_young_length fits into the target pause time. If it
+      // does, it becomes the new min. If it doesn't, it becomes
+      // the new max. This way we maintain the loop invariants.
+
+      assert(min_young_length < max_young_length, "invariant");
+      size_t diff = (max_young_length - min_young_length) / 2;
+      while (diff > 0) {
+        size_t young_length = min_young_length + diff;
+        if (predict_will_fit(young_length, base_time_ms,
+                             base_free_regions, target_pause_time_ms)) {
+          min_young_length = young_length;
+        } else {
+          max_young_length = young_length;
+        }
+        assert(min_young_length <  max_young_length, "invariant");
+        diff = (max_young_length - min_young_length) / 2;
+      }
+      // The results is min_young_length which, according to the
+      // loop invariants, should fit within the target pause time.
+
+      // These are the post-conditions of the binary search above:
+      assert(min_young_length < max_young_length,
+             "otherwise we should have discovered that max_young_length "
+             "fits into the pause target and not done the binary search");
+      assert(predict_will_fit(min_young_length, base_time_ms,
+                              base_free_regions, target_pause_time_ms),
+             "min_young_length, the result of the binary search, should "
+             "fit into the pause target");
+      assert(!predict_will_fit(min_young_length + 1, base_time_ms,
+                               base_free_regions, target_pause_time_ms),
+             "min_young_length, the result of the binary search, should be "
+             "optimal, so no larger length should fit into the pause target");
+    }
+  } else {
+    // Even the minimum length doesn't fit into the pause time
+    // target, return it as the result nevertheless.
+  }
+  return base_min_length + min_young_length;
+}
+
 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   double survivor_regions_evac_time = 0.0;
   for (HeapRegion * r = _recorded_survivor_head;
@@ -737,17 +801,19 @@
   return survivor_regions_evac_time;
 }
 
-void G1CollectorPolicy::check_prediction_validity() {
+void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 
   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   if (rs_lengths > _rs_lengths_prediction) {
     // add 10% to avoid having to recalculate often
     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
-    calculate_young_list_target_length(rs_lengths_prediction);
+    update_young_list_target_length(rs_lengths_prediction);
   }
 }
 
+
+
 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
                                                bool is_tlab,
                                                bool* gc_overhead_limit_was_exceeded) {
@@ -854,16 +920,7 @@
   _free_regions_at_end_of_collection = _g1->free_regions();
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
-  calculate_young_list_min_length();
-  calculate_young_list_target_length();
-}
-
-void G1CollectorPolicy::record_before_bytes(size_t bytes) {
-  _bytes_in_to_space_before_gc += bytes;
-}
-
-void G1CollectorPolicy::record_after_bytes(size_t bytes) {
-  _bytes_in_to_space_after_gc += bytes;
+  update_young_list_target_length();
 }
 
 void G1CollectorPolicy::record_stop_world_start() {
@@ -875,10 +932,14 @@
   if (PrintGCDetails) {
     gclog_or_tty->stamp(PrintGCTimeStamps);
     gclog_or_tty->print("[GC pause");
-    if (in_young_gc_mode())
-      gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
+    gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
   }
 
+  // We only need to do this here as the policy will only be applied
+  // to the GC we're about to start. so, no point is calculating this
+  // every time we calculate / recalculate the target young length.
+  update_survivors_policy();
+
   assert(_g1->used() == _g1->recalculate_used(),
          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
                  _g1->used(), _g1->recalculate_used()));
@@ -893,9 +954,13 @@
   _pending_cards = _g1->pending_card_num();
   _max_pending_cards = _g1->max_pending_card_num();
 
-  _bytes_in_to_space_before_gc = 0;
-  _bytes_in_to_space_after_gc = 0;
   _bytes_in_collection_set_before_gc = 0;
+  _bytes_copied_during_gc = 0;
+
+  YoungList* young_list = _g1->young_list();
+  _eden_bytes_before_gc = young_list->eden_used_bytes();
+  _survivor_bytes_before_gc = young_list->survivor_used_bytes();
+  _capacity_before_gc = _g1->capacity();
 
 #ifdef DEBUG
   // initialise these to something well known so that we can spot
@@ -924,8 +989,7 @@
   _satb_drain_time_set = false;
   _last_satb_drain_processed_buffers = -1;
 
-  if (in_young_gc_mode())
-    _last_young_gc_full = false;
+  _last_young_gc_full = false;
 
   // do that for any other surv rate groups
   _short_lived_surv_rate_group->stop_adding_regions();
@@ -938,12 +1002,7 @@
   _mark_closure_time_ms = mark_closure_time_ms;
 }
 
-void G1CollectorPolicy::record_concurrent_mark_init_start() {
-  _mark_init_start_sec = os::elapsedTime();
-  guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
-}
-
-void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
+void G1CollectorPolicy::record_concurrent_mark_init_end(double
                                                    mark_init_elapsed_time_ms) {
   _during_marking = true;
   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
@@ -951,15 +1010,6 @@
   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 }
 
-void G1CollectorPolicy::record_concurrent_mark_init_end() {
-  double end_time_sec = os::elapsedTime();
-  double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
-  _concurrent_mark_init_times_ms->add(elapsed_time_ms);
-  record_concurrent_mark_init_end_pre(elapsed_time_ms);
-
-  _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
-}
-
 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   _mark_remark_start_sec = os::elapsedTime();
   _during_marking = false;
@@ -990,11 +1040,9 @@
 G1CollectorPolicy::
 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
                                          size_t max_live_bytes) {
-  if (_n_marks < 2) _n_marks++;
-  if (G1PolicyVerbose > 0)
-    gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB "
-                           " (of " SIZE_FORMAT " MB heap).",
-                           max_live_bytes/M, _g1->capacity()/M);
+  if (_n_marks < 2) {
+    _n_marks++;
+  }
 }
 
 // The important thing about this is that it includes "os::elapsedTime".
@@ -1008,27 +1056,15 @@
   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
 
   _num_markings++;
-
-  // We did a marking, so reset the "since_last_mark" variables.
-  double considerConcMarkCost = 1.0;
-  // If there are available processors, concurrent activity is free...
-  if (Threads::number_of_non_daemon_threads() * 2 <
-      os::active_processor_count()) {
-    considerConcMarkCost = 0.0;
-  }
   _n_pauses_at_mark_end = _n_pauses;
   _n_marks_since_last_pause++;
 }
 
 void
 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
-  if (in_young_gc_mode()) {
-    _should_revert_to_full_young_gcs = false;
-    _last_full_young_gc = true;
-    _in_marking_window = false;
-    if (adaptive_young_list_length())
-      calculate_young_list_target_length();
-  }
+  _should_revert_to_full_young_gcs = false;
+  _last_full_young_gc = true;
+  _in_marking_window = false;
 }
 
 void G1CollectorPolicy::record_concurrent_pause() {
@@ -1041,18 +1077,6 @@
 void G1CollectorPolicy::record_concurrent_pause_end() {
 }
 
-void G1CollectorPolicy::record_collection_pause_end_CH_strong_roots() {
-  _cur_CH_strong_roots_end_sec = os::elapsedTime();
-  _cur_CH_strong_roots_dur_ms =
-    (_cur_CH_strong_roots_end_sec - _cur_collection_start_sec) * 1000.0;
-}
-
-void G1CollectorPolicy::record_collection_pause_end_G1_strong_roots() {
-  _cur_G1_strong_roots_end_sec = os::elapsedTime();
-  _cur_G1_strong_roots_dur_ms =
-    (_cur_G1_strong_roots_end_sec - _cur_CH_strong_roots_end_sec) * 1000.0;
-}
-
 template<class T>
 T sum_of(T* sum_arr, int start, int n, int N) {
   T sum = (T)0;
@@ -1174,7 +1198,6 @@
   double end_time_sec = os::elapsedTime();
   double elapsed_ms = _last_pause_time_ms;
   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
-  double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
   size_t rs_size =
     _cur_collection_pause_used_regions_at_start - collection_set_size();
   size_t cur_used_bytes = _g1->used();
@@ -1190,31 +1213,46 @@
   }
 #endif // PRODUCT
 
-  if (in_young_gc_mode()) {
-    last_pause_included_initial_mark = during_initial_mark_pause();
-    if (last_pause_included_initial_mark)
-      record_concurrent_mark_init_end_pre(0.0);
-
-    size_t min_used_targ =
-      (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
-
-
-    if (!_g1->mark_in_progress() && !_last_full_young_gc) {
-      assert(!last_pause_included_initial_mark, "invariant");
-      if (cur_used_bytes > min_used_targ &&
-          cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
+  last_pause_included_initial_mark = during_initial_mark_pause();
+  if (last_pause_included_initial_mark)
+    record_concurrent_mark_init_end(0.0);
+
+  size_t marking_initiating_used_threshold =
+    (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
+
+  if (!_g1->mark_in_progress() && !_last_full_young_gc) {
+    assert(!last_pause_included_initial_mark, "invariant");
+    if (cur_used_bytes > marking_initiating_used_threshold) {
+      if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
         assert(!during_initial_mark_pause(), "we should not see this here");
 
+        ergo_verbose3(ErgoConcCycles,
+                      "request concurrent cycle initiation",
+                      ergo_format_reason("occupancy higher than threshold")
+                      ergo_format_byte("occupancy")
+                      ergo_format_byte_perc("threshold"),
+                      cur_used_bytes,
+                      marking_initiating_used_threshold,
+                      (double) InitiatingHeapOccupancyPercent);
+
         // Note: this might have already been set, if during the last
         // pause we decided to start a cycle but at the beginning of
         // this pause we decided to postpone it. That's OK.
         set_initiate_conc_mark_if_possible();
+      } else {
+        ergo_verbose2(ErgoConcCycles,
+                  "do not request concurrent cycle initiation",
+                  ergo_format_reason("occupancy lower than previous occupancy")
+                  ergo_format_byte("occupancy")
+                  ergo_format_byte("previous occupancy"),
+                  cur_used_bytes,
+                  _prev_collection_pause_used_at_end_bytes);
       }
     }
-
-    _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
   }
 
+  _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
+
   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
                           end_time_sec, false);
 
@@ -1247,14 +1285,52 @@
 
   _n_pauses++;
 
+  double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
+  double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
+  double update_rs_time = avg_value(_par_last_update_rs_times_ms);
+  double update_rs_processed_buffers =
+    sum_of_values(_par_last_update_rs_processed_buffers);
+  double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
+  double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
+  double termination_time = avg_value(_par_last_termination_times_ms);
+
+  double parallel_known_time = update_rs_time +
+                               ext_root_scan_time +
+                               mark_stack_scan_time +
+                               scan_rs_time +
+                               obj_copy_time +
+                               termination_time;
+
+  double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
+
+  PauseSummary* summary = _summary;
+
   if (update_stats) {
-    _recent_CH_strong_roots_times_ms->add(_cur_CH_strong_roots_dur_ms);
-    _recent_G1_strong_roots_times_ms->add(_cur_G1_strong_roots_dur_ms);
-    _recent_evac_times_ms->add(evac_ms);
+    _recent_rs_scan_times_ms->add(scan_rs_time);
     _recent_pause_times_ms->add(elapsed_ms);
-
     _recent_rs_sizes->add(rs_size);
 
+    MainBodySummary* body_summary = summary->main_body_summary();
+    guarantee(body_summary != NULL, "should not be null!");
+
+    if (_satb_drain_time_set)
+      body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
+    else
+      body_summary->record_satb_drain_time_ms(0.0);
+
+    body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
+    body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
+    body_summary->record_update_rs_time_ms(update_rs_time);
+    body_summary->record_scan_rs_time_ms(scan_rs_time);
+    body_summary->record_obj_copy_time_ms(obj_copy_time);
+    if (parallel) {
+      body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
+      body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
+      body_summary->record_termination_time_ms(termination_time);
+      body_summary->record_parallel_other_time_ms(parallel_other_time);
+    }
+    body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
+
     // We exempt parallel collection from this check because Alloc Buffer
     // fragmentation can produce negative collections.  Same with evac
     // failure.
@@ -1319,56 +1395,12 @@
     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
   }
 
-  PauseSummary* summary = _summary;
-
-  double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
-  double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
-  double update_rs_time = avg_value(_par_last_update_rs_times_ms);
-  double update_rs_processed_buffers =
-    sum_of_values(_par_last_update_rs_processed_buffers);
-  double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
-  double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
-  double termination_time = avg_value(_par_last_termination_times_ms);
-
-  double parallel_other_time = _cur_collection_par_time_ms -
-    (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
-     scan_rs_time + obj_copy_time + termination_time);
-  if (update_stats) {
-    MainBodySummary* body_summary = summary->main_body_summary();
-    guarantee(body_summary != NULL, "should not be null!");
-
-    if (_satb_drain_time_set)
-      body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
-    else
-      body_summary->record_satb_drain_time_ms(0.0);
-    body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
-    body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
-    body_summary->record_update_rs_time_ms(update_rs_time);
-    body_summary->record_scan_rs_time_ms(scan_rs_time);
-    body_summary->record_obj_copy_time_ms(obj_copy_time);
-    if (parallel) {
-      body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
-      body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
-      body_summary->record_termination_time_ms(termination_time);
-      body_summary->record_parallel_other_time_ms(parallel_other_time);
-    }
-    body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
-  }
-
   if (G1PolicyVerbose > 1) {
     gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
-                           "        CH Strong: %10.6f ms    (avg: %10.6f ms)\n"
-                           "        G1 Strong: %10.6f ms    (avg: %10.6f ms)\n"
-                           "        Evac:      %10.6f ms    (avg: %10.6f ms)\n"
                            "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
                            "      |RS|: " SIZE_FORMAT,
                            elapsed_ms, recent_avg_time_for_pauses_ms(),
-                           _cur_CH_strong_roots_dur_ms, recent_avg_time_for_CH_strong_ms(),
-                           _cur_G1_strong_roots_dur_ms, recent_avg_time_for_G1_strong_ms(),
-                           evac_ms, recent_avg_time_for_evac_ms(),
-                           scan_rs_time,
-                           recent_avg_time_for_pauses_ms() -
-                           recent_avg_time_for_G1_strong_ms(),
+                           scan_rs_time, recent_avg_time_for_rs_scan_ms(),
                            rs_size);
 
     gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
@@ -1429,7 +1461,7 @@
       }
       print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
 
-      print_stats(2, "Other", parallel_other_time);
+      print_stats(2, "Parallel Other", parallel_other_time);
       print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
     } else {
       print_stats(1, "Update RS", update_rs_time);
@@ -1451,6 +1483,8 @@
 #endif
     print_stats(1, "Other", other_time_ms);
     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
+    print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
+    print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
 
     for (int i = 0; i < _aux_num; ++i) {
       if (_cur_aux_times_set[i]) {
@@ -1460,14 +1494,6 @@
       }
     }
   }
-  if (PrintGCDetails)
-    gclog_or_tty->print("   [");
-  if (PrintGC || PrintGCDetails)
-    _g1->print_size_transition(gclog_or_tty,
-                               _cur_collection_pause_used_at_start_bytes,
-                               _g1->used(), _g1->capacity());
-  if (PrintGCDetails)
-    gclog_or_tty->print_cr("]");
 
   _all_pause_times_ms->add(elapsed_ms);
   if (update_stats) {
@@ -1498,24 +1524,58 @@
     new_in_marking_window_im = true;
   }
 
-  if (in_young_gc_mode()) {
-    if (_last_full_young_gc) {
+  if (_last_full_young_gc) {
+    if (!last_pause_included_initial_mark) {
+      ergo_verbose2(ErgoPartiallyYoungGCs,
+                    "start partially-young GCs",
+                    ergo_format_byte_perc("known garbage"),
+                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
       set_full_young_gcs(false);
-      _last_full_young_gc = false;
+    } else {
+      ergo_verbose0(ErgoPartiallyYoungGCs,
+                    "do not start partially-young GCs",
+                    ergo_format_reason("concurrent cycle is about to start"));
     }
-
-    if ( !_last_young_gc_full ) {
-      if ( _should_revert_to_full_young_gcs ||
-           _known_garbage_ratio < 0.05 ||
-           (adaptive_young_list_length() &&
-           (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
-        set_full_young_gcs(true);
-      }
+    _last_full_young_gc = false;
+  }
+
+  if ( !_last_young_gc_full ) {
+    if (_should_revert_to_full_young_gcs) {
+      ergo_verbose2(ErgoPartiallyYoungGCs,
+                    "end partially-young GCs",
+                    ergo_format_reason("partially-young GCs end requested")
+                    ergo_format_byte_perc("known garbage"),
+                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
+      set_full_young_gcs(true);
+    } else if (_known_garbage_ratio < 0.05) {
+      ergo_verbose3(ErgoPartiallyYoungGCs,
+               "end partially-young GCs",
+               ergo_format_reason("known garbage percent lower than threshold")
+               ergo_format_byte_perc("known garbage")
+               ergo_format_perc("threshold"),
+               _known_garbage_bytes, _known_garbage_ratio * 100.0,
+               0.05 * 100.0);
+      set_full_young_gcs(true);
+    } else if (adaptive_young_list_length() &&
+              (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
+      ergo_verbose5(ErgoPartiallyYoungGCs,
+                    "end partially-young GCs",
+                    ergo_format_reason("current GC efficiency lower than "
+                                       "predicted fully-young GC efficiency")
+                    ergo_format_double("GC efficiency factor")
+                    ergo_format_double("current GC efficiency")
+                    ergo_format_double("predicted fully-young GC efficiency")
+                    ergo_format_byte_perc("known garbage"),
+                    get_gc_eff_factor(), cur_efficiency,
+                    predict_young_gc_eff(),
+                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
+      set_full_young_gcs(true);
     }
-    _should_revert_to_full_young_gcs = false;
-
-    if (_last_young_gc_full && !_during_marking)
-      _young_gc_eff_seq->add(cur_efficiency);
+  }
+  _should_revert_to_full_young_gcs = false;
+
+  if (_last_young_gc_full && !_during_marking) {
+    _young_gc_eff_seq->add(cur_efficiency);
   }
 
   _short_lived_surv_rate_group->start_adding_regions();
@@ -1599,8 +1659,8 @@
 
     double survival_ratio = 0.0;
     if (_bytes_in_collection_set_before_gc > 0) {
-      survival_ratio = (double) bytes_in_to_space_during_gc() /
-        (double) _bytes_in_collection_set_before_gc;
+      survival_ratio = (double) _bytes_copied_during_gc /
+                                   (double) _bytes_in_collection_set_before_gc;
     }
 
     _pending_cards_seq->add((double) _pending_cards);
@@ -1663,8 +1723,7 @@
   _in_marking_window = new_in_marking_window;
   _in_marking_window_im = new_in_marking_window_im;
   _free_regions_at_end_of_collection = _g1->free_regions();
-  calculate_young_list_min_length();
-  calculate_young_list_target_length();
+  update_young_list_target_length();
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
@@ -1672,6 +1731,46 @@
   // </NEW PREDICTION>
 }
 
+#define EXT_SIZE_FORMAT "%d%s"
+#define EXT_SIZE_PARAMS(bytes)                                  \
+  byte_size_in_proper_unit((bytes)),                            \
+  proper_unit_for_byte_size((bytes))
+
+void G1CollectorPolicy::print_heap_transition() {
+  if (PrintGCDetails) {
+    YoungList* young_list = _g1->young_list();
+    size_t eden_bytes = young_list->eden_used_bytes();
+    size_t survivor_bytes = young_list->survivor_used_bytes();
+    size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
+    size_t used = _g1->used();
+    size_t capacity = _g1->capacity();
+    size_t eden_capacity =
+      (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
+
+    gclog_or_tty->print_cr(
+      "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
+      "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
+      "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
+      EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
+      EXT_SIZE_PARAMS(_eden_bytes_before_gc),
+      EXT_SIZE_PARAMS(_prev_eden_capacity),
+      EXT_SIZE_PARAMS(eden_bytes),
+      EXT_SIZE_PARAMS(eden_capacity),
+      EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
+      EXT_SIZE_PARAMS(survivor_bytes),
+      EXT_SIZE_PARAMS(used_before_gc),
+      EXT_SIZE_PARAMS(_capacity_before_gc),
+      EXT_SIZE_PARAMS(used),
+      EXT_SIZE_PARAMS(capacity));
+
+    _prev_eden_capacity = eden_capacity;
+  } else if (PrintGC) {
+    _g1->print_size_transition(gclog_or_tty,
+                               _cur_collection_pause_used_at_start_bytes,
+                               _g1->used(), _g1->capacity());
+  }
+}
+
 // <NEW PREDICTION>
 
 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
@@ -1906,18 +2005,14 @@
   // I don't think we need to do this when in young GC mode since
   // marking will be initiated next time we hit the soft limit anyway...
   if (predicted_time_ms > _expensive_region_limit_ms) {
-    if (!in_young_gc_mode()) {
-        set_full_young_gcs(true);
-        // We might want to do something different here. However,
-        // right now we don't support the non-generational G1 mode
-        // (and in fact we are planning to remove the associated code,
-        // see CR 6814390). So, let's leave it as is and this will be
-        // removed some time in the future
-        ShouldNotReachHere();
-        set_during_initial_mark_pause();
-    } else
-      // no point in doing another partial one
-      _should_revert_to_full_young_gcs = true;
+    ergo_verbose2(ErgoPartiallyYoungGCs,
+              "request partially-young GCs end",
+              ergo_format_reason("predicted region time higher than threshold")
+              ergo_format_ms("predicted region time")
+              ergo_format_ms("threshold"),
+              predicted_time_ms, _expensive_region_limit_ms);
+    // no point in doing another partial one
+    _should_revert_to_full_young_gcs = true;
   }
 }
 
@@ -1932,38 +2027,27 @@
 }
 
 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
-  if (_recent_pause_times_ms->num() == 0) return (double) MaxGCPauseMillis;
-  else return _recent_pause_times_ms->avg();
-}
-
-double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() {
-  if (_recent_CH_strong_roots_times_ms->num() == 0)
-    return (double)MaxGCPauseMillis/3.0;
-  else return _recent_CH_strong_roots_times_ms->avg();
+  if (_recent_pause_times_ms->num() == 0) {
+    return (double) MaxGCPauseMillis;
+  }
+  return _recent_pause_times_ms->avg();
 }
 
-double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() {
-  if (_recent_G1_strong_roots_times_ms->num() == 0)
+double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
+  if (_recent_rs_scan_times_ms->num() == 0) {
     return (double)MaxGCPauseMillis/3.0;
-  else return _recent_G1_strong_roots_times_ms->avg();
-}
-
-double G1CollectorPolicy::recent_avg_time_for_evac_ms() {
-  if (_recent_evac_times_ms->num() == 0) return (double)MaxGCPauseMillis/3.0;
-  else return _recent_evac_times_ms->avg();
+  }
+  return _recent_rs_scan_times_ms->avg();
 }
 
 int G1CollectorPolicy::number_of_recent_gcs() {
-  assert(_recent_CH_strong_roots_times_ms->num() ==
-         _recent_G1_strong_roots_times_ms->num(), "Sequence out of sync");
-  assert(_recent_G1_strong_roots_times_ms->num() ==
-         _recent_evac_times_ms->num(), "Sequence out of sync");
-  assert(_recent_evac_times_ms->num() ==
+  assert(_recent_rs_scan_times_ms->num() ==
          _recent_pause_times_ms->num(), "Sequence out of sync");
   assert(_recent_pause_times_ms->num() ==
          _recent_CS_bytes_used_before->num(), "Sequence out of sync");
   assert(_recent_CS_bytes_used_before->num() ==
          _recent_CS_bytes_surviving->num(), "Sequence out of sync");
+
   return _recent_pause_times_ms->num();
 }
 
@@ -2036,7 +2120,9 @@
 }
 
 size_t G1CollectorPolicy::expansion_amount() {
-  if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
+  double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
+  double threshold = _gc_overhead_perc;
+  if (recent_gc_overhead > threshold) {
     // We will double the existing space, or take
     // G1ExpandByPercentOfAvailable % of the available expansion
     // space, whichever is smaller, bounded below by a minimum
@@ -2051,20 +2137,19 @@
     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
-    if (G1PolicyVerbose > 1) {
-      gclog_or_tty->print("Decided to expand: ratio = %5.2f, "
-                 "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
-                 "                   Answer = %d.\n",
-                 recent_avg_pause_time_ratio(),
-                 byte_size_in_proper_unit(committed_bytes),
-                 proper_unit_for_byte_size(committed_bytes),
-                 byte_size_in_proper_unit(uncommitted_bytes),
-                 proper_unit_for_byte_size(uncommitted_bytes),
-                 byte_size_in_proper_unit(expand_bytes_via_pct),
-                 proper_unit_for_byte_size(expand_bytes_via_pct),
-                 byte_size_in_proper_unit(expand_bytes),
-                 proper_unit_for_byte_size(expand_bytes));
-    }
+
+    ergo_verbose5(ErgoHeapSizing,
+                  "attempt heap expansion",
+                  ergo_format_reason("recent GC overhead higher than "
+                                     "threshold after GC")
+                  ergo_format_perc("recent GC overhead")
+                  ergo_format_perc("threshold")
+                  ergo_format_byte("uncommitted")
+                  ergo_format_byte_perc("calculated expansion amount"),
+                  recent_gc_overhead, threshold,
+                  uncommitted_bytes,
+                  expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
+
     return expand_bytes;
   } else {
     return 0;
@@ -2287,8 +2372,7 @@
 #endif // PRODUCT
 }
 
-void
-G1CollectorPolicy::update_region_num(bool young) {
+void G1CollectorPolicy::update_region_num(bool young) {
   if (young) {
     ++_region_num_young;
   } else {
@@ -2320,7 +2404,7 @@
   };
 }
 
-void G1CollectorPolicy::calculate_max_gc_locker_expansion() {
+void G1CollectorPolicy::update_max_gc_locker_expansion() {
   size_t expansion_region_num = 0;
   if (GCLockerEdenExpansionPercent > 0) {
     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
@@ -2336,20 +2420,15 @@
 }
 
 // Calculates survivor space parameters.
-void G1CollectorPolicy::calculate_survivors_policy()
-{
-  if (G1FixedSurvivorSpaceSize == 0) {
-    _max_survivor_regions = _young_list_target_length / SurvivorRatio;
-  } else {
-    _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
-  }
-
-  if (G1FixedTenuringThreshold) {
-    _tenuring_threshold = MaxTenuringThreshold;
-  } else {
-    _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
+void G1CollectorPolicy::update_survivors_policy() {
+  double max_survivor_regions_d =
+                 (double) _young_list_target_length / (double) SurvivorRatio;
+  // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
+  // smaller than 1.0) we'll get 1.
+  _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
+
+  _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
         HeapRegion::GrainWords * _max_survivor_regions);
-  }
 }
 
 #ifndef PRODUCT
@@ -2374,13 +2453,23 @@
 }
 #endif
 
-bool
-G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
+bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
+                                                     GCCause::Cause gc_cause) {
   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
   if (!during_cycle) {
+    ergo_verbose1(ErgoConcCycles,
+                  "request concurrent cycle initiation",
+                  ergo_format_reason("requested by GC cause")
+                  ergo_format_str("GC cause"),
+                  GCCause::to_string(gc_cause));
     set_initiate_conc_mark_if_possible();
     return true;
   } else {
+    ergo_verbose1(ErgoConcCycles,
+                  "do not request concurrent cycle initiation",
+                  ergo_format_reason("concurrent cycle already in progress")
+                  ergo_format_str("GC cause"),
+                  GCCause::to_string(gc_cause));
     return false;
   }
 }
@@ -2408,10 +2497,21 @@
       // initiate a new cycle.
 
       set_during_initial_mark_pause();
+      // We do not allow non-full young GCs during marking.
+      if (!full_young_gcs()) {
+        set_full_young_gcs(true);
+        ergo_verbose0(ErgoPartiallyYoungGCs,
+                      "end partially-young GCs",
+                      ergo_format_reason("concurrent cycle is about to start"));
+      }
 
       // And we can now clear initiate_conc_mark_if_possible() as
       // we've already acted on it.
       clear_initiate_conc_mark_if_possible();
+
+      ergo_verbose0(ErgoConcCycles,
+                  "initiate concurrent cycle",
+                  ergo_format_reason("concurrent cycle initiation requested"));
     } else {
       // The concurrent marking thread is still finishing up the
       // previous cycle. If we start one right now the two cycles
@@ -2425,6 +2525,9 @@
       // and, if it's in a yield point, it's waiting for us to
       // finish. So, at this point we will not start a cycle and we'll
       // let the concurrent marking thread complete the last one.
+      ergo_verbose0(ErgoConcCycles,
+                    "do not initiate concurrent cycle",
+                    ergo_format_reason("concurrent cycle already in progress"));
     }
   }
 }
@@ -2435,21 +2538,6 @@
   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
 }
 
-class NextNonCSElemFinder: public HeapRegionClosure {
-  HeapRegion* _res;
-public:
-  NextNonCSElemFinder(): _res(NULL) {}
-  bool doHeapRegion(HeapRegion* r) {
-    if (!r->in_collection_set()) {
-      _res = r;
-      return true;
-    } else {
-      return false;
-    }
-  }
-  HeapRegion* res() { return _res; }
-};
-
 class KnownGarbageClosure: public HeapRegionClosure {
   CollectionSetChooser* _hrSorted;
 
@@ -2618,14 +2706,6 @@
   assert(_inc_cset_build_state == Active, "Precondition");
   assert(!hr->is_young(), "non-incremental add of young region");
 
-  if (G1PrintHeapRegions) {
-    gclog_or_tty->print_cr("added region to cset "
-                           "%d:["PTR_FORMAT", "PTR_FORMAT"], "
-                           "top "PTR_FORMAT", %s",
-                           hr->hrs_index(), hr->bottom(), hr->end(),
-                           hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
-  }
-
   if (_g1->mark_in_progress())
     _g1->concurrent_mark()->registerCSetRegion(hr);
 
@@ -2647,9 +2727,7 @@
   _inc_cset_size = 0;
   _inc_cset_bytes_used_before = 0;
 
-  if (in_young_gc_mode()) {
-    _inc_cset_young_index = 0;
-  }
+  _inc_cset_young_index = 0;
 
   _inc_cset_max_finger = 0;
   _inc_cset_recorded_young_bytes = 0;
@@ -2791,14 +2869,6 @@
     _inc_cset_tail->set_next_in_collection_set(hr);
   }
   _inc_cset_tail = hr;
-
-  if (G1PrintHeapRegions) {
-    gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
-                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
-                  "top "PTR_FORMAT", young %s",
-                  hr->hrs_index(), hr->bottom(), hr->end(),
-                  hr->top(), (hr->is_young()) ? "YES" : "NO");
-  }
 }
 
 // Add the region to the LHS of the incremental cset
@@ -2816,14 +2886,6 @@
     _inc_cset_tail = hr;
   }
   _inc_cset_head = hr;
-
-  if (G1PrintHeapRegions) {
-    gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
-                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
-                  "top "PTR_FORMAT", young %s",
-                  hr->hrs_index(), hr->bottom(), hr->end(),
-                  hr->top(), (hr->is_young()) ? "YES" : "NO");
-  }
 }
 
 #ifndef PRODUCT
@@ -2856,6 +2918,8 @@
   // Set this here - in case we're not doing young collections.
   double non_young_start_time_sec = os::elapsedTime();
 
+  YoungList* young_list = _g1->young_list();
+
   start_recording_regions();
 
   guarantee(target_pause_time_ms > 0.0,
@@ -2868,116 +2932,106 @@
 
   double time_remaining_ms = target_pause_time_ms - base_time_ms;
 
+  ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
+                "start choosing CSet",
+                ergo_format_ms("predicted base time")
+                ergo_format_ms("remaining time")
+                ergo_format_ms("target pause time"),
+                base_time_ms, time_remaining_ms, target_pause_time_ms);
+
   // the 10% and 50% values are arbitrary...
-  if (time_remaining_ms < 0.10 * target_pause_time_ms) {
+  double threshold = 0.10 * target_pause_time_ms;
+  if (time_remaining_ms < threshold) {
+    double prev_time_remaining_ms = time_remaining_ms;
     time_remaining_ms = 0.50 * target_pause_time_ms;
     _within_target = false;
+    ergo_verbose3(ErgoCSetConstruction,
+                  "adjust remaining time",
+                  ergo_format_reason("remaining time lower than threshold")
+                  ergo_format_ms("remaining time")
+                  ergo_format_ms("threshold")
+                  ergo_format_ms("adjusted remaining time"),
+                  prev_time_remaining_ms, threshold, time_remaining_ms);
   } else {
     _within_target = true;
   }
 
-  // We figure out the number of bytes available for future to-space.
-  // For new regions without marking information, we must assume the
-  // worst-case of complete survival.  If we have marking information for a
-  // region, we can bound the amount of live data.  We can add a number of
-  // such regions, as long as the sum of the live data bounds does not
-  // exceed the available evacuation space.
-  size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
-
-  size_t expansion_bytes =
-    _g1->expansion_regions() * HeapRegion::GrainBytes;
+  size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
+
+  HeapRegion* hr;
+  double young_start_time_sec = os::elapsedTime();
 
   _collection_set_bytes_used_before = 0;
   _collection_set_size = 0;
-
-  // Adjust for expansion and slop.
-  max_live_bytes = max_live_bytes + expansion_bytes;
-
-  HeapRegion* hr;
-  if (in_young_gc_mode()) {
-    double young_start_time_sec = os::elapsedTime();
-
-    if (G1PolicyVerbose > 0) {
-      gclog_or_tty->print_cr("Adding %d young regions to the CSet",
-                    _g1->young_list()->length());
-    }
-
-    _young_cset_length  = 0;
-    _last_young_gc_full = full_young_gcs() ? true : false;
-
-    if (_last_young_gc_full)
-      ++_full_young_pause_num;
-    else
-      ++_partial_young_pause_num;
-
-    // The young list is laid with the survivor regions from the previous
-    // pause are appended to the RHS of the young list, i.e.
-    //   [Newly Young Regions ++ Survivors from last pause].
-
-    hr = _g1->young_list()->first_survivor_region();
-    while (hr != NULL) {
-      assert(hr->is_survivor(), "badly formed young list");
-      hr->set_young();
-      hr = hr->get_next_young_region();
-    }
-
-    // Clear the fields that point to the survivor list - they are
-    // all young now.
-    _g1->young_list()->clear_survivors();
-
-    if (_g1->mark_in_progress())
-      _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
-
-    _young_cset_length = _inc_cset_young_index;
-    _collection_set = _inc_cset_head;
-    _collection_set_size = _inc_cset_size;
-    _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
-
-    // For young regions in the collection set, we assume the worst
-    // case of complete survival
-    max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
-
-    time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
-    predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
-
-    // The number of recorded young regions is the incremental
-    // collection set's current size
-    set_recorded_young_regions(_inc_cset_size);
-    set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
-    set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
+  _young_cset_length  = 0;
+  _last_young_gc_full = full_young_gcs() ? true : false;
+
+  if (_last_young_gc_full) {
+    ++_full_young_pause_num;
+  } else {
+    ++_partial_young_pause_num;
+  }
+
+  // The young list is laid with the survivor regions from the previous
+  // pause are appended to the RHS of the young list, i.e.
+  //   [Newly Young Regions ++ Survivors from last pause].
+
+  size_t survivor_region_num = young_list->survivor_length();
+  size_t eden_region_num = young_list->length() - survivor_region_num;
+  size_t old_region_num = 0;
+  hr = young_list->first_survivor_region();
+  while (hr != NULL) {
+    assert(hr->is_survivor(), "badly formed young list");
+    hr->set_young();
+    hr = hr->get_next_young_region();
+  }
+
+  // Clear the fields that point to the survivor list - they are all young now.
+  young_list->clear_survivors();
+
+  if (_g1->mark_in_progress())
+    _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
+
+  _young_cset_length = _inc_cset_young_index;
+  _collection_set = _inc_cset_head;
+  _collection_set_size = _inc_cset_size;
+  _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
+  time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
+  predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
+
+  ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
+                "add young regions to CSet",
+                ergo_format_region("eden")
+                ergo_format_region("survivors")
+                ergo_format_ms("predicted young region time"),
+                eden_region_num, survivor_region_num,
+                _inc_cset_predicted_elapsed_time_ms);
+
+  // The number of recorded young regions is the incremental
+  // collection set's current size
+  set_recorded_young_regions(_inc_cset_size);
+  set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
+  set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
 #if PREDICTIONS_VERBOSE
-    set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
+  set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
 #endif // PREDICTIONS_VERBOSE
 
-    if (G1PolicyVerbose > 0) {
-      gclog_or_tty->print_cr("  Added " PTR_FORMAT " Young Regions to CS.",
-                             _inc_cset_size);
-      gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
-                            max_live_bytes/K);
-    }
-
-    assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
-
-    double young_end_time_sec = os::elapsedTime();
-    _recorded_young_cset_choice_time_ms =
-      (young_end_time_sec - young_start_time_sec) * 1000.0;
-
-    // We are doing young collections so reset this.
-    non_young_start_time_sec = young_end_time_sec;
-
-    // Note we can use either _collection_set_size or
-    // _young_cset_length here
-    if (_collection_set_size > 0 && _last_young_gc_full) {
-      // don't bother adding more regions...
-      goto choose_collection_set_end;
-    }
-  }
-
-  if (!in_young_gc_mode() || !full_young_gcs()) {
+  assert(_inc_cset_size == young_list->length(), "Invariant");
+
+  double young_end_time_sec = os::elapsedTime();
+  _recorded_young_cset_choice_time_ms =
+    (young_end_time_sec - young_start_time_sec) * 1000.0;
+
+  // We are doing young collections so reset this.
+  non_young_start_time_sec = young_end_time_sec;
+
+  if (!full_young_gcs()) {
     bool should_continue = true;
     NumberSeq seq;
     double avg_prediction = 100000000000000000.0; // something very large
 
+    size_t prev_collection_set_size = _collection_set_size;
+    double prev_predicted_pause_time_ms = predicted_pause_time_ms;
     do {
       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
                                                       avg_prediction);
@@ -2987,32 +3041,76 @@
         predicted_pause_time_ms += predicted_time_ms;
         add_to_collection_set(hr);
         record_non_young_cset_region(hr);
-        max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
-        if (G1PolicyVerbose > 0) {
-          gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
-                        max_live_bytes/K);
-        }
         seq.add(predicted_time_ms);
         avg_prediction = seq.avg() + seq.sd();
       }
-      should_continue =
-        ( hr != NULL) &&
-        ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
-          : _collection_set_size < _young_list_fixed_length );
+
+      should_continue = true;
+      if (hr == NULL) {
+        // No need for an ergo verbose message here,
+        // getNextMarkRegion() does this when it returns NULL.
+        should_continue = false;
+      } else {
+        if (adaptive_young_list_length()) {
+          if (time_remaining_ms < 0.0) {
+            ergo_verbose1(ErgoCSetConstruction,
+                          "stop adding old regions to CSet",
+                          ergo_format_reason("remaining time is lower than 0")
+                          ergo_format_ms("remaining time"),
+                          time_remaining_ms);
+            should_continue = false;
+          }
+        } else {
+          if (_collection_set_size >= _young_list_fixed_length) {
+            ergo_verbose2(ErgoCSetConstruction,
+                          "stop adding old regions to CSet",
+                          ergo_format_reason("CSet length reached target")
+                          ergo_format_region("CSet")
+                          ergo_format_region("young target"),
+                          _collection_set_size, _young_list_fixed_length);
+            should_continue = false;
+          }
+        }
+      }
     } while (should_continue);
 
     if (!adaptive_young_list_length() &&
-        _collection_set_size < _young_list_fixed_length)
+        _collection_set_size < _young_list_fixed_length) {
+      ergo_verbose2(ErgoCSetConstruction,
+                    "request partially-young GCs end",
+                    ergo_format_reason("CSet length lower than target")
+                    ergo_format_region("CSet")
+                    ergo_format_region("young target"),
+                    _collection_set_size, _young_list_fixed_length);
       _should_revert_to_full_young_gcs  = true;
+    }
+
+    old_region_num = _collection_set_size - prev_collection_set_size;
+
+    ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
+                  "add old regions to CSet",
+                  ergo_format_region("old")
+                  ergo_format_ms("predicted old region time"),
+                  old_region_num,
+                  predicted_pause_time_ms - prev_predicted_pause_time_ms);
   }
 
-choose_collection_set_end:
   stop_incremental_cset_building();
 
   count_CS_bytes_used();
 
   end_recording_regions();
 
+  ergo_verbose5(ErgoCSetConstruction,
+                "finish choosing CSet",
+                ergo_format_region("eden")
+                ergo_format_region("survivors")
+                ergo_format_region("old")
+                ergo_format_ms("predicted pause time")
+                ergo_format_ms("target pause time"),
+                eden_region_num, survivor_region_num, old_region_num,
+                predicted_pause_time_ms, target_pause_time_ms);
+
   double non_young_end_time_sec = os::elapsedTime();
   _recorded_non_young_cset_choice_time_ms =
     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
@@ -3024,12 +3122,6 @@
 }
 
 void G1CollectorPolicy_BestRegionsFirst::
-expand_if_possible(size_t numRegions) {
-  size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
-  _g1->expand(expansion_bytes);
-}
-
-void G1CollectorPolicy_BestRegionsFirst::
 record_collection_pause_end() {
   G1CollectorPolicy::record_collection_pause_end();
   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -119,6 +119,8 @@
   double _cur_satb_drain_time_ms;
   double _cur_clear_ct_time_ms;
   bool   _satb_drain_time_set;
+  double _cur_ref_proc_time_ms;
+  double _cur_ref_enq_time_ms;
 
 #ifndef PRODUCT
   // Card Table Count Cache stats
@@ -129,15 +131,9 @@
   jlong  _num_cc_clears;                // number of times the card count cache has been cleared
 #endif
 
-  double _cur_CH_strong_roots_end_sec;
-  double _cur_CH_strong_roots_dur_ms;
-  double _cur_G1_strong_roots_end_sec;
-  double _cur_G1_strong_roots_dur_ms;
+  // Statistics for recent GC pauses.  See below for how indexed.
+  TruncatedSeq* _recent_rs_scan_times_ms;
 
-  // Statistics for recent GC pauses.  See below for how indexed.
-  TruncatedSeq* _recent_CH_strong_roots_times_ms;
-  TruncatedSeq* _recent_G1_strong_roots_times_ms;
-  TruncatedSeq* _recent_evac_times_ms;
   // These exclude marking times.
   TruncatedSeq* _recent_pause_times_ms;
   TruncatedSeq* _recent_gc_times_ms;
@@ -147,7 +143,6 @@
 
   TruncatedSeq* _recent_rs_sizes;
 
-  TruncatedSeq* _concurrent_mark_init_times_ms;
   TruncatedSeq* _concurrent_mark_remark_times_ms;
   TruncatedSeq* _concurrent_mark_cleanup_times_ms;
 
@@ -184,18 +179,15 @@
   double* _par_last_gc_worker_end_times_ms;
   double* _par_last_gc_worker_times_ms;
 
-  // indicates that we are in young GC mode
-  bool _in_young_gc_mode;
-
   // indicates whether we are in full young or partially young GC mode
   bool _full_young_gcs;
 
   // if true, then it tries to dynamically adjust the length of the
   // young list
   bool _adaptive_young_list_length;
-  size_t _young_list_min_length;
   size_t _young_list_target_length;
   size_t _young_list_fixed_length;
+  size_t _prev_eden_capacity; // used for logging
 
   // The max number of regions we can extend the eden by while the GC
   // locker is active. This should be >= _young_list_target_length;
@@ -217,6 +209,9 @@
 
   double                _gc_overhead_perc;
 
+  double _reserve_factor;
+  size_t _reserve_regions;
+
   bool during_marking() {
     return _during_marking;
   }
@@ -253,6 +248,10 @@
 
   TruncatedSeq* _max_conc_overhead_seq;
 
+  bool   _using_new_ratio_calculations;
+  size_t _min_desired_young_length; // as set on the command line or default calculations
+  size_t _max_desired_young_length; // as set on the command line or default calculations
+
   size_t _recorded_young_regions;
   size_t _recorded_non_young_regions;
   size_t _recorded_region_num;
@@ -466,12 +465,6 @@
   size_t predict_bytes_to_copy(HeapRegion* hr);
   double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
 
-    // for use by: calculate_young_list_target_length(rs_length)
-  bool predict_will_fit(size_t young_region_num,
-                        double base_time_ms,
-                        size_t init_free_regions,
-                        double target_pause_time_ms);
-
   void start_recording_regions();
   void record_cset_region_info(HeapRegion* hr, bool young);
   void record_non_young_cset_region(HeapRegion* hr);
@@ -503,7 +496,6 @@
 
   // </NEW PREDICTION>
 
-public:
   void cset_regions_freed() {
     bool propagate = _last_young_gc_full && !_in_marking_window;
     _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
@@ -533,10 +525,6 @@
     return _mmu_tracker->max_gc_time() * 1000.0;
   }
 
-  double predict_init_time_ms() {
-    return get_new_prediction(_concurrent_mark_init_times_ms);
-  }
-
   double predict_remark_time_ms() {
     return get_new_prediction(_concurrent_mark_remark_times_ms);
   }
@@ -591,13 +579,9 @@
   int _last_update_rs_processed_buffers;
   double _last_pause_time_ms;
 
-  size_t _bytes_in_to_space_before_gc;
-  size_t _bytes_in_to_space_after_gc;
-  size_t bytes_in_to_space_during_gc() {
-    return
-      _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
-  }
   size_t _bytes_in_collection_set_before_gc;
+  size_t _bytes_copied_during_gc;
+
   // Used to count used bytes in CS.
   friend class CountCSClosure;
 
@@ -692,17 +676,11 @@
   // The average time in ms per collection pause, averaged over recent pauses.
   double recent_avg_time_for_pauses_ms();
 
-  // The average time in ms for processing CollectedHeap strong roots, per
-  // collection pause, averaged over recent pauses.
-  double recent_avg_time_for_CH_strong_ms();
-
-  // The average time in ms for processing the G1 remembered set, per
-  // pause, averaged over recent pauses.
-  double recent_avg_time_for_G1_strong_ms();
-
-  // The average time in ms for "evacuating followers", per pause, averaged
-  // over recent pauses.
-  double recent_avg_time_for_evac_ms();
+  // The average time in ms for RS scanning, per pause, averaged
+  // over recent pauses. (Note the RS scanning time for a pause
+  // is itself an average of the RS scanning time for each worker
+  // thread.)
+  double recent_avg_time_for_rs_scan_ms();
 
   // The number of "recent" GCs recorded in the number sequences
   int number_of_recent_gcs();
@@ -792,14 +770,45 @@
   // This set of variables tracks the collector efficiency, in order to
   // determine whether we should initiate a new marking.
   double _cur_mark_stop_world_time_ms;
-  double _mark_init_start_sec;
   double _mark_remark_start_sec;
   double _mark_cleanup_start_sec;
   double _mark_closure_time_ms;
 
-  void   calculate_young_list_min_length();
-  void   calculate_young_list_target_length();
-  void   calculate_young_list_target_length(size_t rs_lengths);
+  // Update the young list target length either by setting it to the
+  // desired fixed value or by calculating it using G1's pause
+  // prediction model. If no rs_lengths parameter is passed, predict
+  // the RS lengths using the prediction model, otherwise use the
+  // given rs_lengths as the prediction.
+  void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
+
+  // Calculate and return the minimum desired young list target
+  // length. This is the minimum desired young list length according
+  // to the user's inputs.
+  size_t calculate_young_list_desired_min_length(size_t base_min_length);
+
+  // Calculate and return the maximum desired young list target
+  // length. This is the maximum desired young list length according
+  // to the user's inputs.
+  size_t calculate_young_list_desired_max_length();
+
+  // Calculate and return the maximum young list target length that
+  // can fit into the pause time goal. The parameters are: rs_lengths
+  // represent the prediction of how large the young RSet lengths will
+  // be, base_min_length is the alreay existing number of regions in
+  // the young list, min_length and max_length are the desired min and
+  // max young list length according to the user's inputs.
+  size_t calculate_young_list_target_length(size_t rs_lengths,
+                                            size_t base_min_length,
+                                            size_t desired_min_length,
+                                            size_t desired_max_length);
+
+  // Check whether a given young length (young_length) fits into the
+  // given target pause time and whether the prediction for the amount
+  // of objects to be copied for the given length will fit into the
+  // given free space (expressed by base_free_regions).  It is used by
+  // calculate_young_list_target_length().
+  bool predict_will_fit(size_t young_length, double base_time_ms,
+                        size_t base_free_regions, double target_pause_time_ms);
 
 public:
 
@@ -811,20 +820,22 @@
     return CollectorPolicy::G1CollectorPolicyKind;
   }
 
-  void check_prediction_validity();
+  // Check the current value of the young list RSet lengths and
+  // compare it against the last prediction. If the current value is
+  // higher, recalculate the young list target length prediction.
+  void revise_young_list_target_length_if_necessary();
 
   size_t bytes_in_collection_set() {
     return _bytes_in_collection_set_before_gc;
   }
 
-  size_t bytes_in_to_space() {
-    return bytes_in_to_space_during_gc();
-  }
-
   unsigned calc_gc_alloc_time_stamp() {
     return _all_pause_times_ms->num() + 1;
   }
 
+  // This should be called after the heap is resized.
+  void record_new_heap_size(size_t new_number_of_regions);
+
 protected:
 
   // Count the number of bytes used in the CS.
@@ -836,6 +847,8 @@
                                                 size_t max_live_bytes);
   void record_concurrent_mark_cleanup_end_work2();
 
+  void update_young_list_size_using_newratio(size_t number_of_heap_regions);
+
 public:
 
   virtual void init();
@@ -869,9 +882,7 @@
                                              size_t start_used);
 
   // Must currently be called while the world is stopped.
-  virtual void record_concurrent_mark_init_start();
-  virtual void record_concurrent_mark_init_end();
-  void record_concurrent_mark_init_end_pre(double
+  void record_concurrent_mark_init_end(double
                                            mark_init_elapsed_time_ms);
 
   void record_mark_closure_time(double mark_closure_time_ms);
@@ -887,10 +898,8 @@
   virtual void record_concurrent_pause();
   virtual void record_concurrent_pause_end();
 
-  virtual void record_collection_pause_end_CH_strong_roots();
-  virtual void record_collection_pause_end_G1_strong_roots();
-
   virtual void record_collection_pause_end();
+  void print_heap_transition();
 
   // Record the fact that a full collection occurred.
   virtual void record_full_collection_start();
@@ -979,6 +988,14 @@
     _cur_aux_times_ms[i] += ms;
   }
 
+  void record_ref_proc_time(double ms) {
+    _cur_ref_proc_time_ms = ms;
+  }
+
+  void record_ref_enq_time(double ms) {
+    _cur_ref_enq_time_ms = ms;
+  }
+
 #ifndef PRODUCT
   void record_cc_clear_time(double ms) {
     if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
@@ -991,9 +1008,16 @@
   }
 #endif
 
-  // Record the fact that "bytes" bytes allocated in a region.
-  void record_before_bytes(size_t bytes);
-  void record_after_bytes(size_t bytes);
+  // Record how much space we copied during a GC. This is typically
+  // called when a GC alloc region is being retired.
+  void record_bytes_copied_during_gc(size_t bytes) {
+    _bytes_copied_during_gc += bytes;
+  }
+
+  // The amount of space we copied during a GC.
+  size_t bytes_copied_during_gc() {
+    return _bytes_copied_during_gc;
+  }
 
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
@@ -1071,7 +1095,7 @@
   // new cycle, as long as we are not already in one. It's best if it
   // is called during a safepoint when the test whether a cycle is in
   // progress or not is stable.
-  bool force_initial_mark_if_outside_cycle();
+  bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
 
   // This is called at the very beginning of an evacuation pause (it
   // has to be the first thing that the pause does). If
@@ -1116,30 +1140,21 @@
   bool is_young_list_full() {
     size_t young_list_length = _g1->young_list()->length();
     size_t young_list_target_length = _young_list_target_length;
-    if (G1FixedEdenSize) {
-      young_list_target_length -= _max_survivor_regions;
-    }
     return young_list_length >= young_list_target_length;
   }
 
   bool can_expand_young_list() {
     size_t young_list_length = _g1->young_list()->length();
     size_t young_list_max_length = _young_list_max_length;
-    if (G1FixedEdenSize) {
-      young_list_max_length -= _max_survivor_regions;
-    }
     return young_list_length < young_list_max_length;
   }
 
+  size_t young_list_max_length() {
+    return _young_list_max_length;
+  }
+
   void update_region_num(bool young);
 
-  bool in_young_gc_mode() {
-    return _in_young_gc_mode;
-  }
-  void set_in_young_gc_mode(bool in_young_gc_mode) {
-    _in_young_gc_mode = in_young_gc_mode;
-  }
-
   bool full_young_gcs() {
     return _full_young_gcs;
   }
@@ -1179,6 +1194,11 @@
   // The limit on the number of regions allocated for survivors.
   size_t _max_survivor_regions;
 
+  // For reporting purposes.
+  size_t _eden_bytes_before_gc;
+  size_t _survivor_bytes_before_gc;
+  size_t _capacity_before_gc;
+
   // The amount of survor regions after a collection.
   size_t _recorded_survivor_regions;
   // List of survivor regions.
@@ -1202,10 +1222,6 @@
     return purpose == GCAllocForSurvived;
   }
 
-  inline GCAllocPurpose alternative_purpose(int purpose) {
-    return GCAllocForTenured;
-  }
-
   static const size_t REGIONS_UNLIMITED = ~(size_t)0;
 
   size_t max_regions(int purpose);
@@ -1242,10 +1258,10 @@
     _survivors_age_table.merge_par(age_table);
   }
 
-  void calculate_max_gc_locker_expansion();
+  void update_max_gc_locker_expansion();
 
   // Calculates survivor space parameters.
-  void calculate_survivors_policy();
+  void update_survivors_policy();
 
 };
 
@@ -1272,8 +1288,6 @@
 
 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
   CollectionSetChooser* _collectionSetChooser;
-  // If the estimated is less then desirable, resize if possible.
-  void expand_if_possible(size_t numRegions);
 
   virtual void choose_collection_set(double target_pause_time_ms);
   virtual void record_collection_pause_start(double start_time_sec,
@@ -1307,8 +1321,4 @@
   return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
 }
 
-// Local Variables: ***
-// c-indentation-style: gnu ***
-// End: ***
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1ErgoVerbose.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1ErgoVerbose.hpp"
+#include "utilities/ostream.hpp"
+
+ErgoLevel G1ErgoVerbose::_level;
+bool G1ErgoVerbose::_enabled[ErgoHeuristicNum];
+
+void G1ErgoVerbose::initialize() {
+  set_level(ErgoLow);
+  set_enabled(false);
+}
+
+void G1ErgoVerbose::set_level(ErgoLevel level) {
+  _level = level;
+}
+
+void G1ErgoVerbose::set_enabled(ErgoHeuristic n, bool enabled) {
+  assert(0 <= n && n < ErgoHeuristicNum, "pre-condition");
+  _enabled[n] = enabled;
+}
+
+void G1ErgoVerbose::set_enabled(bool enabled) {
+  for (int n = 0; n < ErgoHeuristicNum; n += 1) {
+    set_enabled((ErgoHeuristic) n, enabled);
+  }
+}
+
+const char* G1ErgoVerbose::to_string(int tag) {
+  ErgoHeuristic n = extract_heuristic(tag);
+  switch (n) {
+  case ErgoHeapSizing:            return "Heap Sizing";
+  case ErgoCSetConstruction:      return "CSet Construction";
+  case ErgoConcCycles:            return "Concurrent Cycles";
+  case ErgoPartiallyYoungGCs:     return "Partially-Young GCs";
+  default:
+    ShouldNotReachHere();
+    // Keep the Windows compiler happy
+    return NULL;
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ERGOVERBOSE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ERGOVERBOSE_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+// The log of G1's heuristic decisions comprises of a series of
+// records which have a similar format in order to maintain
+// consistency across records and ultimately easier parsing of the
+// output, if we ever choose to do that. Each record consists of:
+// * A time stamp to be able to easily correlate each record with
+// other events.
+// * A unique string to allow us to easily identify such records.
+// * The name of the heuristic the record corresponds to.
+// * An action string which describes the action that G1 did or is
+// about to do.
+// * An optional reason string which describes the reason for the
+// action.
+// * An optional number of name/value pairs which contributed to the
+// decision to take the action described in the record.
+//
+// Each record is associated with a "tag" which is the combination of
+// the heuristic the record corresponds to, as well as the min level
+// of verboseness at which the record should be printed. The tag is
+// checked against the current settings to determine whether the record
+// should be printed or not.
+
+// The available verboseness levels.
+typedef enum {
+  // Determine which part of the tag is occupied by the level.
+  ErgoLevelShift = 8,
+  ErgoLevelMask = ~((1 << ErgoLevelShift) - 1),
+
+  // ErgoLow is 0 so that we don't have to explicitly or a heuristic
+  // id with ErgoLow to keep its use simpler.
+  ErgoLow = 0,
+  ErgoHigh = 1 << ErgoLevelShift
+} ErgoLevel;
+
+// The available heuristics.
+typedef enum {
+  // Determines which part of the tag is occupied by the heuristic id.
+  ErgoHeuristicMask = ~ErgoLevelMask,
+
+  ErgoHeapSizing = 0,
+  ErgoCSetConstruction,
+  ErgoConcCycles,
+  ErgoPartiallyYoungGCs,
+
+  ErgoHeuristicNum
+} ErgoHeuristic;
+
+class G1ErgoVerbose : AllStatic {
+private:
+  // Determines the minimum verboseness level at which records will be
+  // printed.
+  static ErgoLevel _level;
+  // Determines which heuristics are currently enabled.
+  static bool _enabled[ErgoHeuristicNum];
+
+  static ErgoLevel extract_level(int tag) {
+    return (ErgoLevel) (tag & ErgoLevelMask);
+  }
+
+  static ErgoHeuristic extract_heuristic(int tag) {
+    return (ErgoHeuristic) (tag & ErgoHeuristicMask);
+  }
+
+public:
+  // Needs to be explicitly called at GC initialization.
+  static void initialize();
+
+  static void set_level(ErgoLevel level);
+  static void set_enabled(ErgoHeuristic h, bool enabled);
+  // It is applied to all heuristics.
+  static void set_enabled(bool enabled);
+
+  static bool enabled(int tag) {
+    ErgoLevel level = extract_level(tag);
+    ErgoHeuristic n = extract_heuristic(tag);
+    return level <= _level && _enabled[n];
+  }
+
+  // Extract the heuristic id from the tag and return a string with
+  // its name.
+  static const char* to_string(int tag);
+};
+
+// The macros below generate the format string for values of different
+// types and/or metrics.
+
+// The reason for the action is optional and is handled specially: the
+// reason string is concatenated here so it's not necessary to pass it
+// as a parameter.
+#define ergo_format_reason(_reason_) ", reason: " _reason_
+
+// Single parameter format strings
+#define ergo_format_str(_name_)      ", " _name_ ": %s"
+#define ergo_format_region(_name_)   ", " _name_ ": "SIZE_FORMAT" regions"
+#define ergo_format_byte(_name_)     ", " _name_ ": "SIZE_FORMAT" bytes"
+#define ergo_format_double(_name_)   ", " _name_ ": %1.2f"
+#define ergo_format_perc(_name_)     ", " _name_ ": %1.2f %%"
+#define ergo_format_ms(_name_)       ", " _name_ ": %1.2f ms"
+
+// Double parameter format strings
+#define ergo_format_byte_perc(_name_)                                   \
+                             ", " _name_ ": "SIZE_FORMAT" bytes (%1.2f %%)"
+
+// Generates the format string
+#define ergo_format(_action_, _extra_format_)                   \
+  " %1.3f: [G1Ergonomics (%s) " _action_ _extra_format_ "]"
+
+// Conditionally, prints an ergonomic decision record. _extra_format_
+// is the format string for the optional items we'd like to print
+// (i.e., the decision's reason and any associated values). This
+// string should be built up using the ergo_*_format macros (see
+// above) to ensure consistency.
+//
+// Since we cannot rely on the compiler supporting variable argument
+// macros, this macro accepts a fixed number of arguments and passes
+// them to the print method. For convenience, we have wrapper macros
+// below which take a specific number of arguments and set the rest to
+// a default value.
+#define ergo_verbose_common(_tag_, _action_, _extra_format_,            \
+                            _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \
+  do {                                                                  \
+    if (G1ErgoVerbose::enabled((_tag_))) {                              \
+      gclog_or_tty->print_cr(ergo_format(_action_, _extra_format_),     \
+                             os::elapsedTime(),                         \
+                             G1ErgoVerbose::to_string((_tag_)),         \
+                             (_arg0_), (_arg1_), (_arg2_),              \
+                             (_arg3_), (_arg4_), (_arg5_));             \
+    }                                                                   \
+  } while (0)
+
+
+#define ergo_verbose(_tag_, _action_)                           \
+  ergo_verbose_common(_tag_, _action_, "", 0, 0, 0, 0, 0, 0)
+
+#define ergo_verbose0(_tag_, _action_, _extra_format_)                  \
+  ergo_verbose_common(_tag_, _action_, _extra_format_, 0, 0, 0, 0, 0, 0)
+
+#define ergo_verbose1(_tag_, _action_, _extra_format_,                  \
+                      _arg0_)                                           \
+  ergo_verbose_common(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, 0, 0, 0, 0, 0)
+
+#define ergo_verbose2(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_)                                   \
+  ergo_verbose_common(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_, 0, 0, 0, 0)
+
+#define ergo_verbose3(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_, _arg2_)                           \
+  ergo_verbose_common(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_, _arg2_, 0, 0, 0)
+
+#define ergo_verbose4(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_, _arg2_, _arg3_)                   \
+  ergo_verbose_common(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_, _arg2_, _arg3_, 0, 0)
+
+#define ergo_verbose5(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_, _arg2_, _arg3_, _arg4_)           \
+  ergo_verbose_common(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, 0)
+
+#define ergo_verbose6(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_)   \
+  ergo_verbose_common(_tag_, _action_, _extra_format_,                  \
+                      _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_)
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ERGOVERBOSE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1HRPrinter.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1HRPrinter.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "utilities/ostream.hpp"
+
+const char* G1HRPrinter::action_name(ActionType action) {
+  switch(action) {
+    case Alloc:          return "ALLOC";
+    case AllocForce:     return "ALLOC-FORCE";
+    case Retire:         return "RETIRE";
+    case Reuse:          return "REUSE";
+    case CSet:           return "CSET";
+    case EvacFailure:    return "EVAC-FAILURE";
+    case Cleanup:        return "CLEANUP";
+    case PostCompaction: return "POST-COMPACTION";
+    case Commit:         return "COMMIT";
+    case Uncommit:       return "UNCOMMIT";
+    default:             ShouldNotReachHere();
+  }
+  // trying to keep the Windows compiler happy
+  return NULL;
+}
+
+const char* G1HRPrinter::region_type_name(RegionType type) {
+  switch (type) {
+    case Unset:              return NULL;
+    case Eden:               return "Eden";
+    case Survivor:           return "Survivor";
+    case Old:                return "Old";
+    case SingleHumongous:    return "SingleH";
+    case StartsHumongous:    return "StartsH";
+    case ContinuesHumongous: return "ContinuesH";
+    default:                 ShouldNotReachHere();
+  }
+  // trying to keep the Windows compiler happy
+  return NULL;
+}
+
+const char* G1HRPrinter::phase_name(PhaseType phase) {
+  switch (phase) {
+    case StartGC:     return "StartGC";
+    case EndGC:       return "EndGC";
+    case StartFullGC: return "StartFullGC";
+    case EndFullGC:   return "EndFullGC";
+    default:          ShouldNotReachHere();
+  }
+  // trying to keep the Windows compiler happy
+  return NULL;
+}
+
+#define G1HR_PREFIX     " G1HR"
+
+void G1HRPrinter::print(ActionType action, RegionType type,
+                        HeapRegion* hr, HeapWord* top) {
+  const char* action_str = action_name(action);
+  const char* type_str   = region_type_name(type);
+  HeapWord* bottom = hr->bottom();
+
+  if (type_str != NULL) {
+    if (top != NULL) {
+      gclog_or_tty->print_cr(G1HR_PREFIX" %s(%s) "PTR_FORMAT" "PTR_FORMAT,
+                             action_str, type_str, bottom, top);
+    } else {
+      gclog_or_tty->print_cr(G1HR_PREFIX" %s(%s) "PTR_FORMAT,
+                             action_str, type_str, bottom);
+    }
+  } else {
+    if (top != NULL) {
+      gclog_or_tty->print_cr(G1HR_PREFIX" %s "PTR_FORMAT" "PTR_FORMAT,
+                             action_str, bottom, top);
+    } else {
+      gclog_or_tty->print_cr(G1HR_PREFIX" %s "PTR_FORMAT,
+                             action_str, bottom);
+    }
+  }
+}
+
+void G1HRPrinter::print(ActionType action, HeapWord* bottom, HeapWord* end) {
+  const char* action_str = action_name(action);
+
+  gclog_or_tty->print_cr(G1HR_PREFIX" %s ["PTR_FORMAT","PTR_FORMAT"]",
+                         action_str, bottom, end);
+}
+
+void G1HRPrinter::print(PhaseType phase, size_t phase_num) {
+  const char* phase_str = phase_name(phase);
+  gclog_or_tty->print_cr(G1HR_PREFIX" #%s "SIZE_FORMAT, phase_str, phase_num);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1HRPrinter.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
+
+#include "memory/allocation.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+
+#define SKIP_RETIRED_FULL_REGIONS 1
+
+class G1HRPrinter VALUE_OBJ_CLASS_SPEC {
+public:
+  typedef enum {
+    Alloc,
+    AllocForce,
+    Retire,
+    Reuse,
+    CSet,
+    EvacFailure,
+    Cleanup,
+    PostCompaction,
+    Commit,
+    Uncommit
+  } ActionType;
+
+  typedef enum {
+    Unset,
+    Eden,
+    Survivor,
+    Old,
+    SingleHumongous,
+    StartsHumongous,
+    ContinuesHumongous
+  } RegionType;
+
+  typedef enum {
+    StartGC,
+    EndGC,
+    StartFullGC,
+    EndFullGC
+  } PhaseType;
+
+private:
+  bool _active;
+
+  static const char* action_name(ActionType action);
+  static const char* region_type_name(RegionType type);
+  static const char* phase_name(PhaseType phase);
+
+  // Print an action event. This version is used in most scenarios and
+  // only prints the region's bottom. The parameters type and top are
+  // optional (the "not set" values are Unset and NULL).
+  static void print(ActionType action, RegionType type,
+                    HeapRegion* hr, HeapWord* top);
+
+  // Print an action event. This version prints both the region's
+  // bottom and end. Used for Commit / Uncommit events.
+  static void print(ActionType action, HeapWord* bottom, HeapWord* end);
+
+  // Print a phase event.
+  static void print(PhaseType phase, size_t phase_num);
+
+public:
+  // In some places we iterate over a list in order to generate output
+  // for the list's elements. By exposing this we can avoid this
+  // iteration if the printer is not active.
+  const bool is_active() { return _active; }
+
+  // Have to set this explicitly as we have to do this during the
+  // heap's initialize() method, not in the constructor.
+  void set_active(bool active) { _active = active; }
+
+  // The methods below are convenient wrappers for the print() methods.
+
+  void alloc(HeapRegion* hr, RegionType type, bool force = false) {
+    if (is_active()) {
+      print((!force) ? Alloc : AllocForce, type, hr, NULL);
+    }
+  }
+
+  void alloc(RegionType type, HeapRegion* hr, HeapWord* top) {
+    if (is_active()) {
+      print(Alloc, type, hr, top);
+    }
+  }
+
+  void retire(HeapRegion* hr) {
+    if (is_active()) {
+      if (!SKIP_RETIRED_FULL_REGIONS || hr->top() < hr->end()) {
+        print(Retire, Unset, hr, hr->top());
+      }
+    }
+  }
+
+  void reuse(HeapRegion* hr) {
+    if (is_active()) {
+      print(Reuse, Unset, hr, NULL);
+    }
+  }
+
+  void cset(HeapRegion* hr) {
+    if (is_active()) {
+      print(CSet, Unset, hr, NULL);
+    }
+  }
+
+  void evac_failure(HeapRegion* hr) {
+    if (is_active()) {
+      print(EvacFailure, Unset, hr, NULL);
+    }
+  }
+
+  void cleanup(HeapRegion* hr) {
+    if (is_active()) {
+      print(Cleanup, Unset, hr, NULL);
+    }
+  }
+
+  void post_compaction(HeapRegion* hr, RegionType type) {
+    if (is_active()) {
+      print(PostCompaction, type, hr, hr->top());
+    }
+  }
+
+  void commit(HeapWord* bottom, HeapWord* end) {
+    if (is_active()) {
+      print(Commit, bottom, end);
+    }
+  }
+
+  void uncommit(HeapWord* bottom, HeapWord* end) {
+    if (is_active()) {
+      print(Uncommit, bottom, end);
+    }
+  }
+
+  void start_gc(bool full, size_t gc_num) {
+    if (is_active()) {
+      if (!full) {
+        print(StartGC, gc_num);
+      } else {
+        print(StartFullGC, gc_num);
+      }
+    }
+  }
+
+  void end_gc(bool full, size_t gc_num) {
+    if (is_active()) {
+      if (!full) {
+        print(EndGC, gc_num);
+      } else {
+        print(EndFullGC, gc_num);
+      }
+    }
+  }
+
+  G1HRPrinter() : _active(false) { }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,10 +97,6 @@
     // or performance (we are GC'ing most of the time anyway!),
     // simply overwrite the oldest entry in the tracker.
 
-    if (G1PolicyVerbose > 1) {
-      warning("MMU Tracker Queue overflow. Replacing earliest entry.");
-    }
-
     _head_index = trim_index(_head_index + 1);
     assert(_head_index == _tail_index, "Because we have a full circular buffer");
     _tail_index = trim_index(_tail_index + 1);
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -62,6 +62,8 @@
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
+  assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
+
   GenMarkSweep::_ref_processor = rp;
   rp->setup_policy(clear_all_softrefs);
 
@@ -84,11 +86,6 @@
 
   mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
 
-  if (VerifyDuringGC) {
-      G1CollectedHeap* g1h = G1CollectedHeap::heap();
-      g1h->checkConcurrentMark();
-  }
-
   mark_sweep_phase2();
 
   // Don't add any more derived pointers during phase3
@@ -144,6 +141,8 @@
 
   // Process reference objects found during marking
   ReferenceProcessor* rp = GenMarkSweep::ref_processor();
+  assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
+
   rp->setup_policy(clear_all_softrefs);
   rp->process_discovered_references(&GenMarkSweep::is_alive,
                                     &GenMarkSweep::keep_alive,
@@ -171,7 +170,6 @@
   GenMarkSweep::follow_mdo_weak_refs();
   assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
 
-
   // Visit interned string tables and delete unmarked oops
   StringTable::unlink(&GenMarkSweep::is_alive);
   // Clean up unreferenced symbols in symbol table.
@@ -179,6 +177,29 @@
 
   assert(GenMarkSweep::_marking_stack.is_empty(),
          "stack should be empty by now");
+
+  if (VerifyDuringGC) {
+    HandleMark hm;  // handle scope
+    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
+    gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
+    Universe::heap()->prepare_for_verify();
+    // Note: we can verify only the heap here. When an object is
+    // marked, the previous value of the mark word (including
+    // identity hash values, ages, etc) is preserved, and the mark
+    // word is set to markOop::marked_value - effectively removing
+    // any hash values from the mark word. These hash values are
+    // used when verifying the dictionaries and so removing them
+    // from the mark word can make verification of the dictionaries
+    // fail. At the end of the GC, the orginal mark word values
+    // (including hash values) are restored to the appropriate
+    // objects.
+    Universe::heap()->verify(/* allow dirty */ true,
+                             /* silent      */ false,
+                             /* option      */ VerifyOption_G1UseMarkWord);
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    gclog_or_tty->print_cr("]");
+  }
 }
 
 class G1PrepareCompactClosure: public HeapRegionClosure {
@@ -328,7 +349,8 @@
                            NULL,  // do not touch code cache here
                            &GenMarkSweep::adjust_pointer_closure);
 
-  g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
+  assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
+  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -27,19 +27,69 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
 
-G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h,
-                                         VirtualSpace* g1_storage_addr) :
+G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
+                                           const char* name,
+                                           int ordinal, int spaces,
+                                           size_t min_capacity,
+                                           size_t max_capacity,
+                                           size_t curr_capacity)
+  : GenerationCounters(name, ordinal, spaces, min_capacity,
+                       max_capacity, curr_capacity), _g1mm(g1mm) { }
+
+// We pad the capacity three times given that the young generation
+// contains three spaces (eden and two survivors).
+G1YoungGenerationCounters::G1YoungGenerationCounters(G1MonitoringSupport* g1mm,
+                                                     const char* name)
+  : G1GenerationCounters(g1mm, name, 0 /* ordinal */, 3 /* spaces */,
+               G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
+               G1MonitoringSupport::pad_capacity(g1mm->young_gen_max(), 3),
+               G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
+  update_all();
+}
+
+G1OldGenerationCounters::G1OldGenerationCounters(G1MonitoringSupport* g1mm,
+                                                 const char* name)
+  : G1GenerationCounters(g1mm, name, 1 /* ordinal */, 1 /* spaces */,
+               G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
+               G1MonitoringSupport::pad_capacity(g1mm->old_gen_max()),
+               G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
+  update_all();
+}
+
+void G1YoungGenerationCounters::update_all() {
+  size_t committed =
+            G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3);
+  _current_size->set_value(committed);
+}
+
+void G1OldGenerationCounters::update_all() {
+  size_t committed =
+            G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
+  _current_size->set_value(committed);
+}
+
+G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
   _g1h(g1h),
   _incremental_collection_counters(NULL),
   _full_collection_counters(NULL),
-  _non_young_collection_counters(NULL),
+  _old_collection_counters(NULL),
   _old_space_counters(NULL),
   _young_collection_counters(NULL),
   _eden_counters(NULL),
   _from_counters(NULL),
   _to_counters(NULL),
-  _g1_storage_addr(g1_storage_addr)
-{
+
+  _overall_reserved(0),
+  _overall_committed(0),    _overall_used(0),
+  _young_region_num(0),
+  _young_gen_committed(0),
+  _eden_committed(0),       _eden_used(0),
+  _survivor_committed(0),   _survivor_used(0),
+  _old_committed(0),        _old_used(0) {
+
+  _overall_reserved = g1h->max_capacity();
+  recalculate_sizes();
+
   // Counters for GC collections
   //
   //  name "collector.0".  In a generational collector this would be the
@@ -69,110 +119,147 @@
   // generational GC terms.  The "1, 1" parameters are for
   // the n-th generation (=1) with 1 space.
   // Counters are created from minCapacity, maxCapacity, and capacity
-  _non_young_collection_counters =
-    new GenerationCounters("whole heap", 1, 1, _g1_storage_addr);
+  _old_collection_counters = new G1OldGenerationCounters(this, "old");
 
   //  name  "generation.1.space.0"
   // Counters are created from maxCapacity, capacity, initCapacity,
   // and used.
-  _old_space_counters = new HSpaceCounters("space", 0,
-    _g1h->max_capacity(), _g1h->capacity(), _non_young_collection_counters);
+  _old_space_counters = new HSpaceCounters("space", 0 /* ordinal */,
+    pad_capacity(overall_reserved()) /* max_capacity */,
+    pad_capacity(old_space_committed()) /* init_capacity */,
+   _old_collection_counters);
 
   //   Young collection set
   //  name "generation.0".  This is logically the young generation.
   //  The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
-  // See  _non_young_collection_counters for additional counters
-  _young_collection_counters = new GenerationCounters("young", 0, 3, NULL);
+  // See  _old_collection_counters for additional counters
+  _young_collection_counters = new G1YoungGenerationCounters(this, "young");
 
-  // Replace "max_heap_byte_size() with maximum young gen size for
-  // g1Collectedheap
   //  name "generation.0.space.0"
   // See _old_space_counters for additional counters
-  _eden_counters = new HSpaceCounters("eden", 0,
-    _g1h->max_capacity(), eden_space_committed(),
+  _eden_counters = new HSpaceCounters("eden", 0 /* ordinal */,
+    pad_capacity(overall_reserved()) /* max_capacity */,
+    pad_capacity(eden_space_committed()) /* init_capacity */,
     _young_collection_counters);
 
   //  name "generation.0.space.1"
   // See _old_space_counters for additional counters
   // Set the arguments to indicate that this survivor space is not used.
-  _from_counters = new HSpaceCounters("s0", 1, (long) 0, (long) 0,
+  _from_counters = new HSpaceCounters("s0", 1 /* ordinal */,
+    pad_capacity(0) /* max_capacity */,
+    pad_capacity(0) /* init_capacity */,
     _young_collection_counters);
+  // Given that this survivor space is not used, we update it here
+  // once to reflect that its used space is 0 so that we don't have to
+  // worry about updating it again later.
+  _from_counters->update_used(0);
 
   //  name "generation.0.space.2"
   // See _old_space_counters for additional counters
-  _to_counters = new HSpaceCounters("s1", 2,
-    _g1h->max_capacity(),
-    survivor_space_committed(),
+  _to_counters = new HSpaceCounters("s1", 2 /* ordinal */,
+    pad_capacity(overall_reserved()) /* max_capacity */,
+    pad_capacity(survivor_space_committed()) /* init_capacity */,
     _young_collection_counters);
 }
 
-size_t G1MonitoringSupport::overall_committed() {
-  return g1h()->capacity();
-}
+void G1MonitoringSupport::recalculate_sizes() {
+  G1CollectedHeap* g1 = g1h();
+
+  // Recalculate all the sizes from scratch. We assume that this is
+  // called at a point where no concurrent updates to the various
+  // values we read here are possible (i.e., at a STW phase at the end
+  // of a GC).
 
-size_t G1MonitoringSupport::overall_used() {
-  return g1h()->used_unlocked();
-}
+  size_t young_list_length = g1->young_list()->length();
+  size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
+  assert(young_list_length >= survivor_list_length, "invariant");
+  size_t eden_list_length = young_list_length - survivor_list_length;
+  // Max length includes any potential extensions to the young gen
+  // we'll do when the GC locker is active.
+  size_t young_list_max_length = g1->g1_policy()->young_list_max_length();
+  assert(young_list_max_length >= survivor_list_length, "invariant");
+  size_t eden_list_max_length = young_list_max_length - survivor_list_length;
 
-size_t G1MonitoringSupport::eden_space_committed() {
-  return MAX2(eden_space_used(), (size_t) HeapRegion::GrainBytes);
-}
+  _overall_used = g1->used_unlocked();
+  _eden_used = eden_list_length * HeapRegion::GrainBytes;
+  _survivor_used = survivor_list_length * HeapRegion::GrainBytes;
+  _young_region_num = young_list_length;
+  _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
+
+  // First calculate the committed sizes that can be calculated independently.
+  _survivor_committed = _survivor_used;
+  _old_committed = HeapRegion::align_up_to_region_byte_size(_old_used);
 
-size_t G1MonitoringSupport::eden_space_used() {
-  size_t young_list_length = g1h()->young_list()->length();
-  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
-  size_t survivor_used = survivor_space_used();
-  eden_used = subtract_up_to_zero(eden_used, survivor_used);
-  return eden_used;
-}
+  // Next, start with the overall committed size.
+  _overall_committed = g1->capacity();
+  size_t committed = _overall_committed;
+
+  // Remove the committed size we have calculated so far (for the
+  // survivor and old space).
+  assert(committed >= (_survivor_committed + _old_committed), "sanity");
+  committed -= _survivor_committed + _old_committed;
+
+  // Next, calculate and remove the committed size for the eden.
+  _eden_committed = eden_list_max_length * HeapRegion::GrainBytes;
+  // Somewhat defensive: be robust in case there are inaccuracies in
+  // the calculations
+  _eden_committed = MIN2(_eden_committed, committed);
+  committed -= _eden_committed;
 
-size_t G1MonitoringSupport::survivor_space_committed() {
-  return MAX2(survivor_space_used(),
-              (size_t) HeapRegion::GrainBytes);
-}
+  // Finally, give the rest to the old space...
+  _old_committed += committed;
+  // ..and calculate the young gen committed.
+  _young_gen_committed = _eden_committed + _survivor_committed;
 
-size_t G1MonitoringSupport::survivor_space_used() {
-  size_t survivor_num = g1h()->g1_policy()->recorded_survivor_regions();
-  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
-  return survivor_used;
+  assert(_overall_committed ==
+         (_eden_committed + _survivor_committed + _old_committed),
+         "the committed sizes should add up");
+  // Somewhat defensive: cap the eden used size to make sure it
+  // never exceeds the committed size.
+  _eden_used = MIN2(_eden_used, _eden_committed);
+  // _survivor_committed and _old_committed are calculated in terms of
+  // the corresponding _*_used value, so the next two conditions
+  // should hold.
+  assert(_survivor_used <= _survivor_committed, "post-condition");
+  assert(_old_used <= _old_committed, "post-condition");
 }
 
-size_t G1MonitoringSupport::old_space_committed() {
-  size_t committed = overall_committed();
-  size_t eden_committed = eden_space_committed();
-  size_t survivor_committed = survivor_space_committed();
-  committed = subtract_up_to_zero(committed, eden_committed);
-  committed = subtract_up_to_zero(committed, survivor_committed);
-  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
-  return committed;
-}
+void G1MonitoringSupport::recalculate_eden_size() {
+  G1CollectedHeap* g1 = g1h();
+
+  // When a new eden region is allocated, only the eden_used size is
+  // affected (since we have recalculated everything else at the last GC).
 
-// See the comment near the top of g1MonitoringSupport.hpp for
-// an explanation of these calculations for "used" and "capacity".
-size_t G1MonitoringSupport::old_space_used() {
-  size_t used = overall_used();
-  size_t eden_used = eden_space_used();
-  size_t survivor_used = survivor_space_used();
-  used = subtract_up_to_zero(used, eden_used);
-  used = subtract_up_to_zero(used, survivor_used);
-  return used;
-}
-
-void G1MonitoringSupport::update_counters() {
-  if (UsePerfData) {
-    eden_counters()->update_capacity(eden_space_committed());
-    eden_counters()->update_used(eden_space_used());
-    to_counters()->update_capacity(survivor_space_committed());
-    to_counters()->update_used(survivor_space_used());
-    old_space_counters()->update_capacity(old_space_committed());
-    old_space_counters()->update_used(old_space_used());
-    non_young_collection_counters()->update_all();
+  size_t young_region_num = g1h()->young_list()->length();
+  if (young_region_num > _young_region_num) {
+    size_t diff = young_region_num - _young_region_num;
+    _eden_used += diff * HeapRegion::GrainBytes;
+    // Somewhat defensive: cap the eden used size to make sure it
+    // never exceeds the committed size.
+    _eden_used = MIN2(_eden_used, _eden_committed);
+    _young_region_num = young_region_num;
   }
 }
 
-void G1MonitoringSupport::update_eden_counters() {
+void G1MonitoringSupport::update_sizes() {
+  recalculate_sizes();
   if (UsePerfData) {
-    eden_counters()->update_capacity(eden_space_committed());
+    eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
+    eden_counters()->update_used(eden_space_used());
+    // only the to survivor space (s1) is active, so we don't need to
+    // update the counteres for the from survivor space (s0)
+    to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
+    to_counters()->update_used(survivor_space_used());
+    old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
+    old_space_counters()->update_used(old_space_used());
+    old_collection_counters()->update_all();
+    young_collection_counters()->update_all();
+  }
+}
+
+void G1MonitoringSupport::update_eden_size() {
+  recalculate_eden_size();
+  if (UsePerfData) {
     eden_counters()->update_used(eden_space_used());
   }
 }
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -28,101 +28,95 @@
 #include "gc_implementation/shared/hSpaceCounters.hpp"
 
 class G1CollectedHeap;
-class G1SpaceMonitoringSupport;
 
-// Class for monitoring logical spaces in G1.
-// G1 defines a set of regions as a young
-// collection (analogous to a young generation).
-// The young collection is a logical generation
-// with no fixed chunk (see space.hpp) reflecting
-// the address space for the generation.  In addition
-// to the young collection there is its complement
-// the non-young collection that is simply the regions
-// not in the young collection.  The non-young collection
-// is treated here as a logical old generation only
-// because the monitoring tools expect a generational
-// heap.  The monitoring tools expect that a Space
-// (see space.hpp) exists that describe the
-// address space of young collection and non-young
-// collection and such a view is provided here.
+// Class for monitoring logical spaces in G1. It provides data for
+// both G1's jstat counters as well as G1's memory pools.
+//
+// G1 splits the heap into heap regions and each heap region belongs
+// to one of the following categories:
+//
+// * eden      : regions that have been allocated since the last GC
+// * survivors : regions with objects that survived the last few GCs
+// * old       : long-lived non-humongous regions
+// * humongous : humongous regions
+// * free      : free regions
+//
+// The combination of eden and survivor regions form the equivalent of
+// the young generation in the other GCs. The combination of old and
+// humongous regions form the equivalent of the old generation in the
+// other GCs. Free regions do not have a good equivalent in the other
+// GCs given that they can be allocated as any of the other region types.
 //
-// This class provides interfaces to access
-// the value of variables for the young collection
-// that include the "capacity" and "used" of the
-// young collection along with constant values
-// for the minimum and maximum capacities for
-// the logical spaces.  Similarly for the non-young
-// collection.
-//
-// Also provided are counters for G1 concurrent collections
-// and stop-the-world full heap collecitons.
+// The monitoring tools expect the heap to contain a number of
+// generations (young, old, perm) and each generation to contain a
+// number of spaces (young: eden, survivors, old). Given that G1 does
+// not maintain those spaces physically (e.g., the set of
+// non-contiguous eden regions can be considered as a "logical"
+// space), we'll provide the illusion that those generations and
+// spaces exist. In reality, each generation and space refers to a set
+// of heap regions that are potentially non-contiguous.
 //
-// Below is a description of how "used" and "capactiy"
-// (or committed) is calculated for the logical spaces.
+// This class provides interfaces to access the min, current, and max
+// capacity and current occupancy for each of G1's logical spaces and
+// generations we expose to the monitoring tools. Also provided are
+// counters for G1 concurrent collections and stop-the-world full heap
+// collections.
 //
-// 1) The used space calculation for a pool is not necessarily
-// independent of the others. We can easily get from G1 the overall
-// used space in the entire heap, the number of regions in the young
-// generation (includes both eden and survivors), and the number of
-// survivor regions. So, from that we calculate:
+// Below is a description of how the various sizes are calculated.
 //
-//  survivor_used = survivor_num * region_size
-//  eden_used     = young_region_num * region_size - survivor_used
-//  old_gen_used  = overall_used - eden_used - survivor_used
+// * Current Capacity
 //
-// Note that survivor_used and eden_used are upper bounds. To get the
-// actual value we would have to iterate over the regions and add up
-// ->used(). But that'd be expensive. So, we'll accept some lack of
-// accuracy for those two. But, we have to be careful when calculating
-// old_gen_used, in case we subtract from overall_used more then the
-// actual number and our result goes negative.
+//    - heap_capacity = current heap capacity (e.g., current committed size)
+//    - young_gen_capacity = current max young gen target capacity
+//          (i.e., young gen target capacity + max allowed expansion capacity)
+//    - survivor_capacity = current survivor region capacity
+//    - eden_capacity = young_gen_capacity - survivor_capacity
+//    - old_capacity = heap_capacity - young_gen_capacity
+//
+//    What we do in the above is to distribute the free regions among
+//    eden_capacity and old_capacity.
 //
-// 2) Calculating the used space is straightforward, as described
-// above. However, how do we calculate the committed space, given that
-// we allocate space for the eden, survivor, and old gen out of the
-// same pool of regions? One way to do this is to use the used value
-// as also the committed value for the eden and survivor spaces and
-// then calculate the old gen committed space as follows:
+// * Occupancy
 //
-//  old_gen_committed = overall_committed - eden_committed - survivor_committed
+//    - young_gen_used = current young region capacity
+//    - survivor_used = survivor_capacity
+//    - eden_used = young_gen_used - survivor_used
+//    - old_used = overall_used - young_gen_used
 //
-// Maybe a better way to do that would be to calculate used for eden
-// and survivor as a sum of ->used() over their regions and then
-// calculate committed as region_num * region_size (i.e., what we use
-// to calculate the used space now). This is something to consider
-// in the future.
+//    Unfortunately, we currently only keep track of the number of
+//    currently allocated young and survivor regions + the overall used
+//    bytes in the heap, so the above can be a little inaccurate.
+//
+// * Min Capacity
 //
-// 3) Another decision that is again not straightforward is what is
-// the max size that each memory pool can grow to. One way to do this
-// would be to use the committed size for the max for the eden and
-// survivors and calculate the old gen max as follows (basically, it's
-// a similar pattern to what we use for the committed space, as
-// described above):
+//    We set this to 0 for all spaces. We could consider setting the old
+//    min capacity to the min capacity of the heap (see 7078465).
+//
+// * Max Capacity
 //
-//  old_gen_max = overall_max - eden_max - survivor_max
+//    For jstat, we set the max capacity of all spaces to heap_capacity,
+//    given that we don't always have a reasonably upper bound on how big
+//    each space can grow. For the memory pools, we actually make the max
+//    capacity undefined. We could consider setting the old max capacity
+//    to the max capacity of the heap (see 7078465).
 //
-// Unfortunately, the above makes the max of each pool fluctuate over
-// time and, even though this is allowed according to the spec, it
-// broke several assumptions in the M&M framework (there were cases
-// where used would reach a value greater than max). So, for max we
-// use -1, which means "undefined" according to the spec.
+// If we had more accurate occupancy / capacity information per
+// region set the above calculations would be greatly simplified and
+// be made more accurate.
 //
-// 4) Now, there is a very subtle issue with all the above. The
-// framework will call get_memory_usage() on the three pools
-// asynchronously. As a result, each call might get a different value
-// for, say, survivor_num which will yield inconsistent values for
-// eden_used, survivor_used, and old_gen_used (as survivor_num is used
-// in the calculation of all three). This would normally be
-// ok. However, it's possible that this might cause the sum of
-// eden_used, survivor_used, and old_gen_used to go over the max heap
-// size and this seems to sometimes cause JConsole (and maybe other
-// clients) to get confused. There's not a really an easy / clean
-// solution to this problem, due to the asynchrounous nature of the
-// framework.
+// We update all the above synchronously and we store the results in
+// fields so that we just read said fields when needed. A subtle point
+// is that all the above sizes need to be recalculated when the old
+// gen changes capacity (after a GC or after a humongous allocation)
+// but only the eden occupancy changes when a new eden region is
+// allocated. So, in the latter case we have minimal recalcuation to
+// do which is important as we want to keep the eden region allocation
+// path as low-overhead as possible.
 
 class G1MonitoringSupport : public CHeapObj {
+  friend class VMStructs;
+
   G1CollectedHeap* _g1h;
-  VirtualSpace* _g1_storage_addr;
 
   // jstat performance counters
   //  incremental collections both fully and partially young
@@ -133,9 +127,9 @@
   // _from_counters, and _to_counters are associated with
   // this "generational" counter.
   GenerationCounters*  _young_collection_counters;
-  //  non-young collection set counters. The _old_space_counters
+  //  old collection set counters. The _old_space_counters
   // below are associated with this "generational" counter.
-  GenerationCounters*  _non_young_collection_counters;
+  GenerationCounters*  _old_collection_counters;
   // Counters for the capacity and used for
   //   the whole heap
   HSpaceCounters*      _old_space_counters;
@@ -145,6 +139,27 @@
   HSpaceCounters*      _from_counters;
   HSpaceCounters*      _to_counters;
 
+  // When it's appropriate to recalculate the various sizes (at the
+  // end of a GC, when a new eden region is allocated, etc.) we store
+  // them here so that we can easily report them when needed and not
+  // have to recalculate them every time.
+
+  size_t _overall_reserved;
+  size_t _overall_committed;
+  size_t _overall_used;
+
+  size_t _young_region_num;
+  size_t _young_gen_committed;
+  size_t _eden_committed;
+  size_t _eden_used;
+  size_t _survivor_committed;
+  size_t _survivor_used;
+
+  size_t _old_committed;
+  size_t _old_used;
+
+  G1CollectedHeap* g1h() { return _g1h; }
+
   // It returns x - y if x > y, 0 otherwise.
   // As described in the comment above, some of the inputs to the
   // calculations we have to do are obtained concurrently and hence
@@ -160,15 +175,35 @@
     }
   }
 
+  // Recalculate all the sizes.
+  void recalculate_sizes();
+  // Recalculate only what's necessary when a new eden region is allocated.
+  void recalculate_eden_size();
+
  public:
-  G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr);
+  G1MonitoringSupport(G1CollectedHeap* g1h);
 
-  G1CollectedHeap* g1h() { return _g1h; }
-  VirtualSpace* g1_storage_addr() { return _g1_storage_addr; }
+  // Unfortunately, the jstat tool assumes that no space has 0
+  // capacity. In our case, given that each space is logical, it's
+  // possible that no regions will be allocated to it, hence to have 0
+  // capacity (e.g., if there are no survivor regions, the survivor
+  // space has 0 capacity). The way we deal with this is to always pad
+  // each capacity value we report to jstat by a very small amount to
+  // make sure that it's never zero. Given that we sometimes have to
+  // report a capacity of a generation that contains several spaces
+  // (e.g., young gen includes one eden, two survivor spaces), the
+  // mult parameter is provided in order to adding the appropriate
+  // padding multiple times so that the capacities add up correctly.
+  static size_t pad_capacity(size_t size_bytes, size_t mult = 1) {
+    return size_bytes + MinObjAlignmentInBytes * mult;
+  }
 
-  // Performance Counter accessors
-  void update_counters();
-  void update_eden_counters();
+  // Recalculate all the sizes from scratch and update all the jstat
+  // counters accordingly.
+  void update_sizes();
+  // Recalculate only what's necessary when a new eden region is
+  // allocated and update any jstat counters that need to be updated.
+  void update_eden_size();
 
   CollectorCounters* incremental_collection_counters() {
     return _incremental_collection_counters;
@@ -176,8 +211,11 @@
   CollectorCounters* full_collection_counters() {
     return _full_collection_counters;
   }
-  GenerationCounters* non_young_collection_counters() {
-    return _non_young_collection_counters;
+  GenerationCounters* young_collection_counters() {
+    return _young_collection_counters;
+  }
+  GenerationCounters* old_collection_counters() {
+    return _old_collection_counters;
   }
   HSpaceCounters*      old_space_counters() { return _old_space_counters; }
   HSpaceCounters*      eden_counters() { return _eden_counters; }
@@ -187,17 +225,45 @@
   // Monitoring support used by
   //   MemoryService
   //   jstat counters
-  size_t overall_committed();
-  size_t overall_used();
+
+  size_t overall_reserved()           { return _overall_reserved;     }
+  size_t overall_committed()          { return _overall_committed;    }
+  size_t overall_used()               { return _overall_used;         }
 
-  size_t eden_space_committed();
-  size_t eden_space_used();
+  size_t young_gen_committed()        { return _young_gen_committed;  }
+  size_t young_gen_max()              { return overall_reserved();    }
+  size_t eden_space_committed()       { return _eden_committed;       }
+  size_t eden_space_used()            { return _eden_used;            }
+  size_t survivor_space_committed()   { return _survivor_committed;   }
+  size_t survivor_space_used()        { return _survivor_used;        }
+
+  size_t old_gen_committed()          { return old_space_committed(); }
+  size_t old_gen_max()                { return overall_reserved();    }
+  size_t old_space_committed()        { return _old_committed;        }
+  size_t old_space_used()             { return _old_used;             }
+};
 
-  size_t survivor_space_committed();
-  size_t survivor_space_used();
+class G1GenerationCounters: public GenerationCounters {
+protected:
+  G1MonitoringSupport* _g1mm;
+
+public:
+  G1GenerationCounters(G1MonitoringSupport* g1mm,
+                       const char* name, int ordinal, int spaces,
+                       size_t min_capacity, size_t max_capacity,
+                       size_t curr_capacity);
+};
 
-  size_t old_space_committed();
-  size_t old_space_used();
+class G1YoungGenerationCounters: public G1GenerationCounters {
+public:
+  G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
+  virtual void update_all();
+};
+
+class G1OldGenerationCounters: public G1GenerationCounters {
+public:
+  G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
+  virtual void update_all();
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
 class CMBitMap;
 class CMMarkStack;
 class G1ParScanThreadState;
+class CMTask;
+class ReferenceProcessor;
 
 // A class that scans oops in a given heap region (much as OopsInGenClosure
 // scans oops in a generation.)
@@ -40,7 +42,7 @@
 protected:
   HeapRegion* _from;
 public:
-  virtual void set_region(HeapRegion* from) { _from = from; }
+  void set_region(HeapRegion* from) { _from = from; }
 };
 
 class G1ParClosureSuper : public OopsInHeapRegionClosure {
@@ -49,6 +51,8 @@
   G1RemSet* _g1_rem;
   ConcurrentMark* _cm;
   G1ParScanThreadState* _par_scan_state;
+  bool _during_initial_mark;
+  bool _mark_in_progress;
 public:
   G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
   bool apply_to_weak_ref_discovered_field() { return true; }
@@ -56,8 +60,10 @@
 
 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
 public:
-  G1ParPushHeapRSClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
+  G1ParPushHeapRSClosure(G1CollectedHeap* g1,
+                         G1ParScanThreadState* par_scan_state):
     G1ParClosureSuper(g1, par_scan_state) { }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)          { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
@@ -65,8 +71,13 @@
 
 class G1ParScanClosure : public G1ParClosureSuper {
 public:
-  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    G1ParClosureSuper(g1, par_scan_state) { }
+  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
+    G1ParClosureSuper(g1, par_scan_state)
+  {
+    assert(_ref_processor == NULL, "sanity");
+    _ref_processor = rp;
+  }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)          { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
@@ -89,9 +100,18 @@
 
 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
   G1ParScanClosure _scanner;
+
 public:
-  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
+  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
+    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
+  {
+    assert(_ref_processor == NULL, "sanity");
+  }
+
+  G1ParScanClosure* scanner() {
+    return &_scanner;
+  }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
@@ -101,8 +121,9 @@
 class G1ParCopyHelper : public G1ParClosureSuper {
   G1ParScanClosure *_scanner;
 protected:
-  template <class T> void mark_forwardee(T* p);
-  oop copy_to_survivor_space(oop obj);
+  template <class T> void mark_object(T* p);
+  oop copy_to_survivor_space(oop obj, bool should_mark_root,
+                                      bool should_mark_copy);
 public:
   G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
                   G1ParScanClosure *scanner) :
@@ -110,17 +131,25 @@
 };
 
 template<bool do_gen_barrier, G1Barrier barrier,
-         bool do_mark_forwardee>
+         bool do_mark_object>
 class G1ParCopyClosure : public G1ParCopyHelper {
   G1ParScanClosure _scanner;
+
   template <class T> void do_oop_work(T* p);
+
 public:
-  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    _scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
+  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
+                   ReferenceProcessor* rp) :
+    _scanner(g1, par_scan_state, rp),
+    G1ParCopyHelper(g1, par_scan_state, &_scanner)
+  {
+    assert(_ref_processor == NULL, "sanity");
+  }
+
+  G1ParScanClosure* scanner() { return &_scanner; }
+
   template <class T> void do_oop_nv(T* p) {
     do_oop_work(p);
-    if (do_mark_forwardee)
-      mark_forwardee(p);
   }
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
@@ -128,21 +157,25 @@
 
 typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
 typedef G1ParCopyClosure<true,  G1BarrierNone, false> G1ParScanPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
+
 typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
 typedef G1ParCopyClosure<true,  G1BarrierNone, true> G1ParScanAndMarkPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
+
+// The following closure types are no longer used but are retained
+// for historical reasons:
+// typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
+// typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
 
-// This is the only case when we set skip_cset_test. Basically, this
-// closure is (should?) only be called directly while we're draining
-// the overflow and task queues. In that case we know that the
-// reference in question points into the collection set, otherwise we
-// would not have pushed it on the queue. The following is defined in
-// g1_specialized_oop_closures.hpp.
-// typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
-// We need a separate closure to handle references during evacuation
-// failure processing, as we cannot asume that the reference already
-// points into the collection set (like G1ParScanHeapEvacClosure does).
+// The following closure type is defined in g1_specialized_oop_closures.hpp:
+//
+// typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
+
+// We use a separate closure to handle references during evacuation
+// failure processing.
+// We could have used another instance of G1ParScanHeapEvacClosure
+// (since that closure no longer assumes that the references it
+// handles point into the collection set).
+
 typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 
 class FilterIntoCSClosure: public OopClosure {
@@ -151,9 +184,10 @@
   DirtyCardToOopClosure* _dcto_cl;
 public:
   FilterIntoCSClosure(  DirtyCardToOopClosure* dcto_cl,
-                        G1CollectedHeap* g1, OopClosure* oc) :
-    _dcto_cl(dcto_cl), _g1(g1), _oc(oc)
-  {}
+                        G1CollectedHeap* g1,
+                        OopClosure* oc) :
+    _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)        { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
@@ -161,44 +195,6 @@
   bool do_header() { return false; }
 };
 
-class FilterInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure {
-  G1CollectedHeap* _g1;
-  OopsInHeapRegionClosure* _oc;
-public:
-  FilterInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1,
-                                     OopsInHeapRegionClosure* oc) :
-    _g1(g1), _oc(oc)
-  {}
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(oop* p) { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
-  bool apply_to_weak_ref_discovered_field() { return true; }
-  bool do_header() { return false; }
-  void set_region(HeapRegion* from) {
-    _oc->set_region(from);
-  }
-};
-
-class FilterAndMarkInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure {
-  G1CollectedHeap* _g1;
-  ConcurrentMark* _cm;
-  OopsInHeapRegionClosure* _oc;
-public:
-  FilterAndMarkInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1,
-                                            OopsInHeapRegionClosure* oc,
-                                            ConcurrentMark* cm)
-  : _g1(g1), _oc(oc), _cm(cm) { }
-
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(oop* p) { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
-  bool apply_to_weak_ref_discovered_field() { return true; }
-  bool do_header() { return false; }
-  void set_region(HeapRegion* from) {
-    _oc->set_region(from);
-  }
-};
-
 class FilterOutOfRegionClosure: public OopClosure {
   HeapWord* _r_bottom;
   HeapWord* _r_end;
@@ -214,4 +210,16 @@
   int out_of_region() { return _out_of_region; }
 };
 
+// Closure for iterating over object fields during concurrent marking
+class G1CMOopClosure : public OopClosure {
+  G1CollectedHeap*   _g1h;
+  ConcurrentMark*    _cm;
+  CMTask*            _task;
+public:
+  G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(      oop* p) { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+};
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
 
-#include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/concurrentMark.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1OopClosures.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
@@ -66,27 +66,6 @@
   }
 }
 
-template <class T> inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop) &&
-      _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop)))
-    _oc->do_oop(p);
-}
-
-template <class T> inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj);
-    if (hr != NULL) {
-      if (hr->in_collection_set())
-        _oc->do_oop(p);
-      else if (!hr->is_young())
-        _cm->grayRoot(obj);
-    }
-  }
-}
-
 // This closure is applied to the fields of the objects that have just been copied.
 template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
@@ -129,5 +108,18 @@
   }
 }
 
+template <class T> inline void G1CMOopClosure::do_oop_nv(T* p) {
+  assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
+  assert(!_g1h->is_on_master_free_list(
+                    _g1h->heap_region_containing((HeapWord*) p)), "invariant");
+
+  oop obj = oopDesc::load_decode_heap_oop(p);
+  if (_cm->verbose_high()) {
+    gclog_or_tty->print_cr("[%d] we're looking at location "
+                           "*"PTR_FORMAT" = "PTR_FORMAT,
+                           _task->task_id(), p, (void*) obj);
+  }
+  _task->deal_with_reference(obj);
+}
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -66,41 +66,6 @@
 }
 #endif
 
-
-class IntoCSOopClosure: public OopsInHeapRegionClosure {
-  OopsInHeapRegionClosure* _blk;
-  G1CollectedHeap* _g1;
-public:
-  IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
-    _g1(g1), _blk(blk) {}
-  void set_region(HeapRegion* from) {
-    _blk->set_region(from);
-  }
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-  template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
-  }
-  bool apply_to_weak_ref_discovered_field() { return true; }
-  bool idempotent() { return true; }
-};
-
-class VerifyRSCleanCardOopClosure: public OopClosure {
-  G1CollectedHeap* _g1;
-public:
-  VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-  template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    HeapRegion* to = _g1->heap_region_containing(obj);
-    guarantee(to == NULL || !to->in_collection_set(),
-              "Missed a rem set member.");
-  }
-};
-
 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
   : _g1(g1), _conc_refine_cards(0),
     _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
@@ -269,6 +234,7 @@
   HeapRegion *startRegion = calculateStartRegion(worker_i);
 
   ScanRSClosure scanRScl(oc, worker_i);
+
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   scanRScl.set_try_claimed();
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
@@ -318,6 +284,7 @@
   double start = os::elapsedTime();
   // Apply the given closure to all remaining log entries.
   RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
+
   _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
 
   // Now there should be no dirty cards.
@@ -332,31 +299,6 @@
   _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
 }
 
-#ifndef PRODUCT
-class PrintRSClosure : public HeapRegionClosure {
-  int _count;
-public:
-  PrintRSClosure() : _count(0) {}
-  bool doHeapRegion(HeapRegion* r) {
-    HeapRegionRemSet* hrrs = r->rem_set();
-    _count += (int) hrrs->occupied();
-    if (hrrs->occupied() == 0) {
-      gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") "
-                          "has no remset entries\n",
-                          r->bottom(), r->end());
-    } else {
-      gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n",
-                          r->bottom(), r->end());
-      r->print();
-      hrrs->print();
-      gclog_or_tty->print("\nDone printing rem set\n");
-    }
-    return false;
-  }
-  int occupied() {return _count;}
-};
-#endif
-
 class CountRSSizeClosure: public HeapRegionClosure {
   size_t _n;
   size_t _tot;
@@ -482,10 +424,6 @@
 }
 
 void G1RemSet::prepare_for_oops_into_collection_set_do() {
-#if G1_REM_SET_LOGGING
-  PrintRSClosure cl;
-  _g1->collection_set_iterate(&cl);
-#endif
   cleanupHRRS();
   ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
   _g1->set_refine_cte_cl_concurrency(false);
@@ -504,14 +442,6 @@
 }
 
 
-class cleanUpIteratorsClosure : public HeapRegionClosure {
-  bool doHeapRegion(HeapRegion *r) {
-    HeapRegionRemSet* hrrs = r->rem_set();
-    hrrs->init_for_par_iteration();
-    return false;
-  }
-};
-
 // This closure, applied to a DirtyCardQueueSet, is used to immediately
 // update the RSets for the regions in the CSet. For each card it iterates
 // through the oops which coincide with that card. It scans the reference
@@ -572,18 +502,13 @@
 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
   guarantee( _cards_scanned != NULL, "invariant" );
   _total_cards_scanned = 0;
-  for (uint i = 0; i < n_workers(); ++i)
+  for (uint i = 0; i < n_workers(); ++i) {
     _total_cards_scanned += _cards_scanned[i];
+  }
   FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
   _cards_scanned = NULL;
   // Cleanup after copy
-#if G1_REM_SET_LOGGING
-  PrintRSClosure cl;
-  _g1->heap_region_iterate(&cl);
-#endif
   _g1->set_refine_cte_cl_concurrency(true);
-  cleanUpIteratorsClosure iterClosure;
-  _g1->collection_set_iterate(&iterClosure);
   // Set all cards back to clean.
   _g1->cleanUpCardTable();
 
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -142,8 +142,6 @@
   virtual void prepare_for_verify();
 };
 
-#define G1_REM_SET_LOGGING 0
-
 class CountNonCleanMemRegionClosure: public MemRegionClosure {
   G1CollectedHeap* _g1;
   int _n;
--- a/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -65,12 +65,6 @@
 
   HeapRegion* to = _g1->heap_region_containing(obj);
   if (to != NULL && from != to) {
-#if G1_REM_SET_LOGGING
-    gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
-                           " for region [" PTR_FORMAT ", " PTR_FORMAT ")",
-                           p, obj,
-                           to->bottom(), to->end());
-#endif
     assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
     to->rem_set()->add_reference(p, tid);
   }
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -39,8 +39,6 @@
   develop(intx, G1MarkingOverheadPercent, 0,                                \
           "Overhead of concurrent marking")                                 \
                                                                             \
-  develop(bool, G1Gen, true,                                                \
-          "If true, it will enable the generational G1")                    \
                                                                             \
   develop(intx, G1PolicyVerbose, 0,                                         \
           "The verbosity level on G1 policy decisions")                     \
@@ -126,9 +124,6 @@
   develop(bool, G1RSBarrierNullFilter, true,                                \
           "If true, generate null-pointer filtering code in RS barrier")    \
                                                                             \
-  develop(bool, G1PrintCTFilterStats, false,                                \
-          "If true, print stats on RS filtering effectiveness")             \
-                                                                            \
   develop(bool, G1DeferredRSUpdate, true,                                   \
           "If true, use deferred RS updates")                               \
                                                                             \
@@ -139,9 +134,9 @@
   develop(bool, G1RSCountHisto, false,                                      \
           "If true, print a histogram of RS occupancies after each pause")  \
                                                                             \
-  product(bool, G1PrintRegionLivenessInfo, false,                           \
-          "Prints the liveness information for all regions in the heap "    \
-          "at the end of a marking cycle.")                                 \
+  diagnostic(bool, G1PrintRegionLivenessInfo, false,                        \
+            "Prints the liveness information for all regions in the heap "  \
+            "at the end of a marking cycle.")                               \
                                                                             \
   develop(bool, G1PrintParCleanupStats, false,                              \
           "When true, print extra stats about parallel cleanup.")           \
@@ -233,7 +228,7 @@
           "the number of regions for which we'll print a surv rate "        \
           "summary.")                                                       \
                                                                             \
-  product(intx, G1ReservePercent, 10,                                       \
+  product(uintx, G1ReservePercent, 10,                                      \
           "It determines the minimum reserve we should have in the heap "   \
           "to minimize the probability of promotion failure.")              \
                                                                             \
@@ -251,16 +246,6 @@
           "When set, G1 will fail when it encounters an FP 'error', "       \
           "so as to allow debugging")                                       \
                                                                             \
-  develop(bool, G1FixedTenuringThreshold, false,                            \
-          "When set, G1 will not adjust the tenuring threshold")            \
-                                                                            \
-  develop(bool, G1FixedEdenSize, false,                                     \
-          "When set, G1 will not allocate unused survivor space regions")   \
-                                                                            \
-  develop(uintx, G1FixedSurvivorSpaceSize, 0,                               \
-          "If non-0 is the size of the G1 survivor space, "                 \
-          "otherwise SurvivorRatio is used to determine the size")          \
-                                                                            \
   product(uintx, G1HeapRegionSize, 0,                                       \
           "Size of the G1 regions.")                                        \
                                                                             \
--- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@
 };
 
 template<bool do_gen_barrier, G1Barrier barrier,
-         bool do_mark_forwardee>
+         bool do_mark_object>
 class G1ParCopyClosure;
 class G1ParScanClosure;
 class G1ParPushHeapRSClosure;
@@ -45,8 +45,7 @@
 
 class FilterIntoCSClosure;
 class FilterOutOfRegionClosure;
-class FilterInHeapRegionAndIntoCSClosure;
-class FilterAndMarkInHeapRegionAndIntoCSClosure;
+class G1CMOopClosure;
 
 #ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
 #error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
@@ -58,8 +57,7 @@
       f(G1ParPushHeapRSClosure,_nv)                     \
       f(FilterIntoCSClosure,_nv)                        \
       f(FilterOutOfRegionClosure,_nv)                   \
-      f(FilterInHeapRegionAndIntoCSClosure,_nv)         \
-      f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv)
+      f(G1CMOopClosure,_nv)
 
 #ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
 #error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -45,7 +45,7 @@
                                  FilterKind fk) :
   ContiguousSpaceDCTOC(hr, cl, precision, NULL),
   _hr(hr), _fk(fk), _g1(g1)
-{}
+{ }
 
 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
                                                    OopClosure* oc) :
@@ -60,13 +60,14 @@
   oop _containing_obj;
   bool _failures;
   int _n_failures;
-  bool _use_prev_marking;
+  VerifyOption _vo;
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
     _g1h(g1h), _bs(NULL), _containing_obj(NULL),
-    _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
+    _failures(false), _n_failures(0), _vo(vo)
   {
     BarrierSet* bs = _g1h->barrier_set();
     if (bs->is_a(BarrierSet::CardTableModRef))
@@ -95,14 +96,14 @@
 
   template <class T> void do_oop_work(T* p) {
     assert(_containing_obj != NULL, "Precondition");
-    assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
+    assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
            "Precondition");
     T heap_oop = oopDesc::load_heap_oop(p);
     if (!oopDesc::is_null(heap_oop)) {
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       bool failed = false;
       if (!_g1h->is_in_closed_subset(obj) ||
-          _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
+          _g1h->is_obj_dead_cond(obj, _vo)) {
         if (!_failures) {
           gclog_or_tty->print_cr("");
           gclog_or_tty->print_cr("----------");
@@ -159,20 +160,16 @@
               gclog_or_tty->print_cr("----------");
             }
             gclog_or_tty->print_cr("Missing rem set entry:");
-            gclog_or_tty->print_cr("Field "PTR_FORMAT
-                          " of obj "PTR_FORMAT
-                          ", in region %d ["PTR_FORMAT
-                          ", "PTR_FORMAT"),",
-                          p, (void*) _containing_obj,
-                          from->hrs_index(),
-                          from->bottom(),
-                          from->end());
+            gclog_or_tty->print_cr("Field "PTR_FORMAT" "
+                                   "of obj "PTR_FORMAT", "
+                                   "in region "HR_FORMAT,
+                                   p, (void*) _containing_obj,
+                                   HR_FORMAT_PARAMS(from));
             _containing_obj->print_on(gclog_or_tty);
-            gclog_or_tty->print_cr("points to obj "PTR_FORMAT
-                          " in region %d ["PTR_FORMAT
-                          ", "PTR_FORMAT").",
-                          (void*) obj, to->hrs_index(),
-                          to->bottom(), to->end());
+            gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
+                                   "in region "HR_FORMAT,
+                                   (void*) obj,
+                                   HR_FORMAT_PARAMS(to));
             obj->print_on(gclog_or_tty);
             gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
                           cv_obj, cv_field);
@@ -213,15 +210,17 @@
                                               HeapWord* top,
                                               OopClosure* cl) {
   G1CollectedHeap* g1h = _g1;
+  int oop_size;
+  OopClosure* cl2 = NULL;
 
-  int oop_size;
-
-  OopClosure* cl2 = cl;
   FilterIntoCSClosure intoCSFilt(this, g1h, cl);
   FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
+
   switch (_fk) {
+  case NoFilterKind:          cl2 = cl; break;
   case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
   case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
+  default:                    ShouldNotReachHere();
   }
 
   // Start filtering what we add to the remembered set. If the object is
@@ -242,16 +241,19 @@
     case NoFilterKind:
       bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
       break;
+
     case IntoCSFilterKind: {
       FilterIntoCSClosure filt(this, g1h, cl);
       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
       break;
     }
+
     case OutOfRegionFilterKind: {
       FilterOutOfRegionClosure filt(_hr, cl);
       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
       break;
     }
+
     default:
       ShouldNotReachHere();
     }
@@ -355,7 +357,6 @@
          "we should have already filtered out humongous regions");
 
   _in_collection_set = false;
-  _is_gc_alloc_region = false;
 
   set_young_index_in_cset(-1);
   uninstall_surv_rate_group();
@@ -484,16 +485,16 @@
 
 
 HeapRegion::
-HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
-                     MemRegion mr, bool is_zeroed)
+HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
+           MemRegion mr, bool is_zeroed)
   : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
-    _next_fk(HeapRegionDCTOC::NoFilterKind),
-    _hrs_index(-1),
+    _hrs_index(hrs_index),
     _humongous_type(NotHumongous), _humongous_start_region(NULL),
-    _in_collection_set(false), _is_gc_alloc_region(false),
+    _in_collection_set(false),
     _next_in_special_set(NULL), _orig_end(NULL),
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
+    _gc_efficiency(0.0),
     _young_type(NotYoung), _next_young_region(NULL),
     _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
 #ifdef ASSERT
@@ -720,8 +721,6 @@
   }
   if (in_collection_set())
     st->print(" CS");
-  else if (is_gc_alloc_region())
-    st->print(" A ");
   else
     st->print("   ");
   if (is_young())
@@ -740,20 +739,20 @@
 
 void HeapRegion::verify(bool allow_dirty) const {
   bool dummy = false;
-  verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
+  verify(allow_dirty, VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
 }
 
 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 // We would need a mechanism to make that code skip dead objects.
 
 void HeapRegion::verify(bool allow_dirty,
-                        bool use_prev_marking,
+                        VerifyOption vo,
                         bool* failures) const {
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
   *failures = false;
   HeapWord* p = bottom();
   HeapWord* prev_p = NULL;
-  VerifyLiveClosure vl_cl(g1, use_prev_marking);
+  VerifyLiveClosure vl_cl(g1, vo);
   bool is_humongous = isHumongous();
   bool do_bot_verify = !is_young();
   size_t object_num = 0;
@@ -778,7 +777,7 @@
       return;
     }
 
-    if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
+    if (!g1->is_obj_dead_cond(obj, this, vo)) {
       if (obj->is_oop()) {
         klassOop klass = obj->klass();
         if (!klass->is_perm()) {
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -52,9 +52,11 @@
 class HeapRegion;
 class HeapRegionSetBase;
 
-#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
-#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \
-                               (_hr_)->top(), (_hr_)->end()
+#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
+#define HR_FORMAT_PARAMS(_hr_) \
+                (_hr_)->hrs_index(), \
+                (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
+                (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
 
 // A dirty card to oop closure for heap regions. It
 // knows how to get the G1 heap and how to use the bitmap
@@ -116,7 +118,6 @@
                   FilterKind fk);
 };
 
-
 // The complicating factor is that BlockOffsetTable diverged
 // significantly, and we need functionality that is only in the G1 version.
 // So I copied that code, which led to an alternate G1 version of
@@ -221,10 +222,6 @@
     ContinuesHumongous
   };
 
-  // The next filter kind that should be used for a "new_dcto_cl" call with
-  // the "traditional" signature.
-  HeapRegionDCTOC::FilterKind _next_fk;
-
   // Requires that the region "mr" be dense with objects, and begin and end
   // with an object.
   void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
@@ -237,9 +234,8 @@
   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
 
  protected:
-  // If this region is a member of a HeapRegionSeq, the index in that
-  // sequence, otherwise -1.
-  int  _hrs_index;
+  // The index of this region in the heap region sequence.
+  size_t  _hrs_index;
 
   HumongousType _humongous_type;
   // For a humongous region, region in which it starts.
@@ -250,10 +246,6 @@
   // True iff the region is in current collection_set.
   bool _in_collection_set;
 
-  // Is this or has it been an allocation region in the current collection
-  // pause.
-  bool _is_gc_alloc_region;
-
   // True iff an attempt to evacuate an object in the region failed.
   bool _evacuation_failed;
 
@@ -296,8 +288,7 @@
   enum YoungType {
     NotYoung,                   // a region is not young
     Young,                      // a region is young
-    Survivor                    // a region is young and it contains
-                                // survivor
+    Survivor                    // a region is young and it contains survivors
   };
 
   volatile YoungType _young_type;
@@ -351,7 +342,8 @@
 
  public:
   // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
-  HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
+  HeapRegion(size_t hrs_index,
+             G1BlockOffsetSharedArray* sharedOffsetArray,
              MemRegion mr, bool is_zeroed);
 
   static int LogOfHRGrainBytes;
@@ -365,6 +357,11 @@
   static int GrainWords;
   static int CardsPerRegion;
 
+  static size_t align_up_to_region_byte_size(size_t sz) {
+    return (sz + (size_t) GrainBytes - 1) &
+                                      ~((1 << (size_t) LogOfHRGrainBytes) - 1);
+  }
+
   // It sets up the heap region size (GrainBytes / GrainWords), as
   // well as other related fields that are based on the heap region
   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
@@ -393,8 +390,7 @@
 
   // If this region is a member of a HeapRegionSeq, the index in that
   // sequence, otherwise -1.
-  int hrs_index() const { return _hrs_index; }
-  void set_hrs_index(int index) { _hrs_index = index; }
+  size_t hrs_index() const { return _hrs_index; }
 
   // The number of bytes marked live in the region in the last marking phase.
   size_t marked_bytes()    { return _prev_marked_bytes; }
@@ -497,27 +493,6 @@
     _next_in_special_set = r;
   }
 
-  // True iff it is or has been an allocation region in the current
-  // collection pause.
-  bool is_gc_alloc_region() const {
-    return _is_gc_alloc_region;
-  }
-  void set_is_gc_alloc_region(bool b) {
-    _is_gc_alloc_region = b;
-  }
-  HeapRegion* next_gc_alloc_region() {
-    assert(is_gc_alloc_region(), "should only invoke on member of CS.");
-    assert(_next_in_special_set == NULL ||
-           _next_in_special_set->is_gc_alloc_region(),
-           "Malformed CS.");
-    return _next_in_special_set;
-  }
-  void set_next_gc_alloc_region(HeapRegion* r) {
-    assert(is_gc_alloc_region(), "should only invoke on member of CS.");
-    assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
-    _next_in_special_set = r;
-  }
-
   // Methods used by the HeapRegionSetBase class and subclasses.
 
   // Getter and setter for the next field used to link regions into
@@ -579,6 +554,8 @@
   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 
+  HeapWord* orig_end() { return _orig_end; }
+
   // Allows logical separation between objects allocated before and after.
   void save_marks();
 
@@ -596,40 +573,14 @@
   // allocated in the current region before the last call to "save_mark".
   void oop_before_save_marks_iterate(OopClosure* cl);
 
-  // This call determines the "filter kind" argument that will be used for
-  // the next call to "new_dcto_cl" on this region with the "traditional"
-  // signature (i.e., the call below.)  The default, in the absence of a
-  // preceding call to this method, is "NoFilterKind", and a call to this
-  // method is necessary for each such call, or else it reverts to the
-  // default.
-  // (This is really ugly, but all other methods I could think of changed a
-  // lot of main-line code for G1.)
-  void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
-    _next_fk = nfk;
-  }
-
   DirtyCardToOopClosure*
   new_dcto_closure(OopClosure* cl,
                    CardTableModRefBS::PrecisionStyle precision,
                    HeapRegionDCTOC::FilterKind fk);
 
-#if WHASSUP
-  DirtyCardToOopClosure*
-  new_dcto_closure(OopClosure* cl,
-                   CardTableModRefBS::PrecisionStyle precision,
-                   HeapWord* boundary) {
-    assert(boundary == NULL, "This arg doesn't make sense here.");
-    DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
-    _next_fk = HeapRegionDCTOC::NoFilterKind;
-    return res;
-  }
-#endif
-
-  //
   // Note the start or end of marking. This tells the heap region
   // that the collector is about to start or has finished (concurrently)
   // marking the heap.
-  //
 
   // Note the start of a marking phase. Record the
   // start of the unmarked area of the region here.
@@ -853,14 +804,20 @@
   void print() const;
   void print_on(outputStream* st) const;
 
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
+  // vo == UsePrevMarking  -> use "prev" marking information,
+  // vo == UseNextMarking -> use "next" marking information
+  // vo == UseMarkWord    -> use the mark word in the object header
+  //
   // NOTE: Only the "prev" marking information is guaranteed to be
   // consistent most of the time, so most calls to this should use
-  // use_prev_marking == true. Currently, there is only one case where
-  // this is called with use_prev_marking == false, which is to verify
-  // the "next" marking information at the end of remark.
-  void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
+  // vo == UsePrevMarking.
+  // Currently, there is only one case where this is called with
+  // vo == UseNextMarking, which is to verify the "next" marking
+  // information at the end of remark.
+  // Currently there is only one place where this is called with
+  // vo == UseMarkWord, which is to verify the marking during a
+  // full GC.
+  void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
 
   // Override; it uses the "prev" marking information
   virtual void verify(bool allow_dirty) const;
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -364,7 +364,10 @@
   PosParPRT** next_addr() { return &_next; }
 
   bool should_expand(int tid) {
-    return par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region();
+    // Given that we now defer RSet updates for after a GC we don't
+    // really need to expand the tables any more. This code should be
+    // cleaned up in the future (see CR 6921087).
+    return false;
   }
 
   void par_expand() {
@@ -834,7 +837,7 @@
 #endif
 
   // Set the corresponding coarse bit.
-  int max_hrs_index = max->hr()->hrs_index();
+  size_t max_hrs_index = max->hr()->hrs_index();
   if (!_coarse_map.at(max_hrs_index)) {
     _coarse_map.at_put(max_hrs_index, true);
     _n_coarse_entries++;
@@ -860,7 +863,8 @@
                               BitMap* region_bm, BitMap* card_bm) {
   // First eliminated garbage regions from the coarse map.
   if (G1RSScrubVerbose)
-    gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index());
+    gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
+                           hr()->hrs_index());
 
   assert(_coarse_map.size() == region_bm->size(), "Precondition");
   if (G1RSScrubVerbose)
@@ -878,7 +882,8 @@
       PosParPRT* nxt = cur->next();
       // If the entire region is dead, eliminate.
       if (G1RSScrubVerbose)
-        gclog_or_tty->print_cr("     For other region %d:", cur->hr()->hrs_index());
+        gclog_or_tty->print_cr("     For other region "SIZE_FORMAT":",
+                               cur->hr()->hrs_index());
       if (!region_bm->at(cur->hr()->hrs_index())) {
         *prev = nxt;
         cur->set_next(NULL);
@@ -994,7 +999,7 @@
 
 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
-  size_t hrs_ind = (size_t)from_hr->hrs_index();
+  size_t hrs_ind = from_hr->hrs_index();
   size_t ind = hrs_ind & _mod_max_fine_entries_mask;
   if (del_single_region_table(ind, from_hr)) {
     assert(!_coarse_map.at(hrs_ind), "Inv");
@@ -1002,7 +1007,7 @@
     _coarse_map.par_at_put(hrs_ind, 0);
   }
   // Check to see if any of the fcc entries come from here.
-  int hr_ind = hr()->hrs_index();
+  size_t hr_ind = hr()->hrs_index();
   for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
     int fcc_ent = _from_card_cache[tid][hr_ind];
     if (fcc_ent != -1) {
@@ -1083,8 +1088,9 @@
 
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                    HeapRegion* hr)
-  : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
-
+  : _bosa(bosa), _other_regions(hr) {
+  reset_for_par_iteration();
+}
 
 void HeapRegionRemSet::setup_remset_size() {
   // Setup sparse and fine-grain tables sizes.
@@ -1099,10 +1105,6 @@
   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
 }
 
-void HeapRegionRemSet::init_for_par_iteration() {
-  _iter_state = Unclaimed;
-}
-
 bool HeapRegionRemSet::claim_iter() {
   if (_iter_state != Unclaimed) return false;
   jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
@@ -1117,7 +1119,6 @@
   return _iter_state == Complete;
 }
 
-
 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
   iter->initialize(this);
 }
@@ -1130,7 +1131,7 @@
   while (iter.has_next(card_index)) {
     HeapWord* card_start =
       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
-    gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", card_start);
+    gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
   }
   // XXX
   if (iter.n_yielded() != occupied()) {
@@ -1157,6 +1158,14 @@
 void HeapRegionRemSet::clear() {
   _other_regions.clear();
   assert(occupied() == 0, "Should be clear.");
+  reset_for_par_iteration();
+}
+
+void HeapRegionRemSet::reset_for_par_iteration() {
+  _iter_state = Unclaimed;
+  _iter_claimed = 0;
+  // It's good to check this to make sure that the two methods are in sync.
+  assert(verify_ready_for_par_iteration(), "post-condition");
 }
 
 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -262,8 +262,6 @@
   virtual void cleanup() = 0;
 #endif
 
-  // Should be called from single-threaded code.
-  void init_for_par_iteration();
   // Attempt to claim the region.  Returns true iff this call caused an
   // atomic transition from Unclaimed to Claimed.
   bool claim_iter();
@@ -273,7 +271,6 @@
   bool iter_is_complete();
 
   // Support for claiming blocks of cards during iteration
-  void set_iter_claimed(size_t x) { _iter_claimed = (jlong)x; }
   size_t iter_claimed() const { return (size_t)_iter_claimed; }
   // Claim the next block of cards
   size_t iter_claimed_next(size_t step) {
@@ -284,6 +281,11 @@
     } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
     return current;
   }
+  void reset_for_par_iteration();
+
+  bool verify_ready_for_par_iteration() {
+    return (_iter_state == Unclaimed) && (_iter_claimed == 0);
+  }
 
   // Initialize the given iterator to iterate over this rem set.
   void init_iterator(HeapRegionRemSetIterator* iter) const;
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -23,259 +23,182 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionSets.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "memory/allocation.hpp"
 
-// Local to this file.
-
-static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
-  if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
-  else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
-  else if (*hr1p == *hr2p) return 0;
-  else {
-    assert(false, "We should never compare distinct overlapping regions.");
-  }
-  return 0;
-}
-
-HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
-  _alloc_search_start(0),
-  // The line below is the worst bit of C++ hackery I've ever written
-  // (Detlefs, 11/23).  You should think of it as equivalent to
-  // "_regions(100, true)": initialize the growable array and inform it
-  // that it should allocate its elem array(s) on the C heap.
-  //
-  // The first argument, however, is actually a comma expression
-  // (set_allocation_type(this, C_HEAP), 100). The purpose of the
-  // set_allocation_type() call is to replace the default allocation
-  // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
-  // allow to pass the assert in GenericGrowableArray() which checks
-  // that a growable array object must be on C heap if elements are.
-  //
-  // Note: containing object is allocated on C heap since it is CHeapObj.
-  //
-  _regions((ResourceObj::set_allocation_type((address)&_regions,
-                                             ResourceObj::C_HEAP),
-            (int)max_size),
-           true),
-  _next_rr_candidate(0),
-  _seq_bottom(NULL)
-{}
-
-// Private methods.
+// Private
 
-void HeapRegionSeq::print_empty_runs() {
-  int empty_run = 0;
-  int n_empty = 0;
-  int empty_run_start;
-  for (int i = 0; i < _regions.length(); i++) {
-    HeapRegion* r = _regions.at(i);
-    if (r->continuesHumongous()) continue;
-    if (r->is_empty()) {
-      assert(!r->isHumongous(), "H regions should not be empty.");
-      if (empty_run == 0) empty_run_start = i;
-      empty_run++;
-      n_empty++;
-    } else {
-      if (empty_run > 0) {
-        gclog_or_tty->print("  %d:%d", empty_run_start, empty_run);
-        empty_run = 0;
-      }
-    }
-  }
-  if (empty_run > 0) {
-    gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
-  }
-  gclog_or_tty->print_cr(" [tot = %d]", n_empty);
-}
-
-int HeapRegionSeq::find(HeapRegion* hr) {
-  // FIXME: optimized for adjacent regions of fixed size.
-  int ind = hr->hrs_index();
-  if (ind != -1) {
-    assert(_regions.at(ind) == hr, "Mismatch");
-  }
-  return ind;
-}
-
-
-// Public methods.
+size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
+  size_t len = length();
+  assert(num > 1, "use this only for sequences of length 2 or greater");
+  assert(from <= len,
+         err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
+                 from, len));
 
-void HeapRegionSeq::insert(HeapRegion* hr) {
-  assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
-  if (_regions.length() == 0
-      || _regions.top()->end() <= hr->bottom()) {
-    hr->set_hrs_index(_regions.length());
-    _regions.append(hr);
-  } else {
-    _regions.append(hr);
-    _regions.sort(orderRegions);
-    for (int i = 0; i < _regions.length(); i++) {
-      _regions.at(i)->set_hrs_index(i);
-    }
-  }
-  char* bot = (char*)_regions.at(0)->bottom();
-  if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
-}
-
-size_t HeapRegionSeq::length() {
-  return _regions.length();
-}
-
-size_t HeapRegionSeq::free_suffix() {
-  size_t res = 0;
-  int first = _regions.length() - 1;
-  int cur = first;
-  while (cur >= 0 &&
-         (_regions.at(cur)->is_empty()
-          && (first == cur
-              || (_regions.at(cur+1)->bottom() ==
-                  _regions.at(cur)->end())))) {
-      res++;
-      cur--;
-  }
-  return res;
-}
-
-int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
-  assert(num > 1, "pre-condition");
-  assert(0 <= from && from <= _regions.length(),
-         err_msg("from: %d should be valid and <= than %d",
-                 from, _regions.length()));
-
-  int curr = from;
-  int first = -1;
+  size_t curr = from;
+  size_t first = G1_NULL_HRS_INDEX;
   size_t num_so_far = 0;
-  while (curr < _regions.length() && num_so_far < num) {
-    HeapRegion* curr_hr = _regions.at(curr);
-    if (curr_hr->is_empty()) {
-      if (first == -1) {
+  while (curr < len && num_so_far < num) {
+    if (at(curr)->is_empty()) {
+      if (first == G1_NULL_HRS_INDEX) {
         first = curr;
         num_so_far = 1;
       } else {
         num_so_far += 1;
       }
     } else {
-      first = -1;
+      first = G1_NULL_HRS_INDEX;
       num_so_far = 0;
     }
     curr += 1;
   }
-
   assert(num_so_far <= num, "post-condition");
   if (num_so_far == num) {
     // we found enough space for the humongous object
-    assert(from <= first && first < _regions.length(), "post-condition");
-    assert(first < curr && (curr - first) == (int) num, "post-condition");
-    for (int i = first; i < first + (int) num; ++i) {
-      assert(_regions.at(i)->is_empty(), "post-condition");
+    assert(from <= first && first < len, "post-condition");
+    assert(first < curr && (curr - first) == num, "post-condition");
+    for (size_t i = first; i < first + num; ++i) {
+      assert(at(i)->is_empty(), "post-condition");
     }
     return first;
   } else {
     // we failed to find enough space for the humongous object
-    return -1;
+    return G1_NULL_HRS_INDEX;
   }
 }
 
-int HeapRegionSeq::find_contiguous(size_t num) {
-  assert(num > 1, "otherwise we should not be calling this");
-  assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
-         err_msg("_alloc_search_start: %d should be valid and <= than %d",
-                 _alloc_search_start, _regions.length()));
+// Public
+
+void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
+                               size_t max_length) {
+  assert((size_t) bottom % HeapRegion::GrainBytes == 0,
+         "bottom should be heap region aligned");
+  assert((size_t) end % HeapRegion::GrainBytes == 0,
+         "end should be heap region aligned");
+
+  _length = 0;
+  _heap_bottom = bottom;
+  _heap_end = end;
+  _region_shift = HeapRegion::LogOfHRGrainBytes;
+  _next_search_index = 0;
+  _allocated_length = 0;
+  _max_length = max_length;
+
+  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
+  memset(_regions, 0, max_length * sizeof(HeapRegion*));
+  _regions_biased = _regions - ((size_t) bottom >> _region_shift);
+
+  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
+         "bottom should be included in the region with index 0");
+}
+
+MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
+                                   HeapWord* new_end,
+                                   FreeRegionList* list) {
+  assert(old_end < new_end, "don't call it otherwise");
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  HeapWord* next_bottom = old_end;
+  assert(_heap_bottom <= next_bottom, "invariant");
+  while (next_bottom < new_end) {
+    assert(next_bottom < _heap_end, "invariant");
+    size_t index = length();
 
-  int start = _alloc_search_start;
-  int res = find_contiguous_from(start, num);
-  if (res == -1 && start != 0) {
-    // Try starting from the beginning. If _alloc_search_start was 0,
-    // no point in doing this again.
-    res = find_contiguous_from(0, num);
+    assert(index < _max_length, "otherwise we cannot expand further");
+    if (index == 0) {
+      // We have not allocated any regions so far
+      assert(next_bottom == _heap_bottom, "invariant");
+    } else {
+      // next_bottom should match the end of the last/previous region
+      assert(next_bottom == at(index - 1)->end(), "invariant");
+    }
+
+    if (index == _allocated_length) {
+      // We have to allocate a new HeapRegion.
+      HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
+      if (new_hr == NULL) {
+        // allocation failed, we bail out and return what we have done so far
+        return MemRegion(old_end, next_bottom);
+      }
+      assert(_regions[index] == NULL, "invariant");
+      _regions[index] = new_hr;
+      increment_length(&_allocated_length);
+    }
+    // Have to increment the length first, otherwise we will get an
+    // assert failure at(index) below.
+    increment_length(&_length);
+    HeapRegion* hr = at(index);
+    list->add_as_tail(hr);
+
+    next_bottom = hr->end();
   }
-  if (res != -1) {
-    assert(0 <= res && res < _regions.length(),
-           err_msg("res: %d should be valid", res));
-    _alloc_search_start = res + (int) num;
-    assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
-           err_msg("_alloc_search_start: %d should be valid",
-                   _alloc_search_start));
+  assert(next_bottom == new_end, "post-condition");
+  return MemRegion(old_end, next_bottom);
+}
+
+size_t HeapRegionSeq::free_suffix() {
+  size_t res = 0;
+  size_t index = length();
+  while (index > 0) {
+    index -= 1;
+    if (!at(index)->is_empty()) {
+      break;
+    }
+    res += 1;
   }
   return res;
 }
 
-void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
-  iterate_from((HeapRegion*)NULL, blk);
+size_t HeapRegionSeq::find_contiguous(size_t num) {
+  assert(num > 1, "use this only for sequences of length 2 or greater");
+  assert(_next_search_index <= length(),
+         err_msg("_next_search_indeex: "SIZE_FORMAT" "
+                 "should be valid and <= than "SIZE_FORMAT,
+                 _next_search_index, length()));
+
+  size_t start = _next_search_index;
+  size_t res = find_contiguous_from(start, num);
+  if (res == G1_NULL_HRS_INDEX && start > 0) {
+    // Try starting from the beginning. If _next_search_index was 0,
+    // no point in doing this again.
+    res = find_contiguous_from(0, num);
+  }
+  if (res != G1_NULL_HRS_INDEX) {
+    assert(res < length(),
+           err_msg("res: "SIZE_FORMAT" should be valid", res));
+    _next_search_index = res + num;
+    assert(_next_search_index <= length(),
+           err_msg("_next_search_indeex: "SIZE_FORMAT" "
+                   "should be valid and <= than "SIZE_FORMAT,
+                   _next_search_index, length()));
+  }
+  return res;
 }
 
-// The first argument r is the heap region at which iteration begins.
-// This operation runs fastest when r is NULL, or the heap region for
-// which a HeapRegionClosure most recently returned true, or the
-// heap region immediately to its right in the sequence.  In all
-// other cases a linear search is required to find the index of r.
-
-void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
-
-  // :::: FIXME ::::
-  // Static cache value is bad, especially when we start doing parallel
-  // remembered set update. For now just don't cache anything (the
-  // code in the def'd out blocks).
+void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
+  iterate_from((HeapRegion*) NULL, blk);
+}
 
-#if 0
-  static int cached_j = 0;
-#endif
-  int len = _regions.length();
-  int j = 0;
-  // Find the index of r.
-  if (r != NULL) {
-#if 0
-    assert(cached_j >= 0, "Invariant.");
-    if ((cached_j < len) && (r == _regions.at(cached_j))) {
-      j = cached_j;
-    } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
-      j = cached_j + 1;
-    } else {
-      j = find(r);
-#endif
-      if (j < 0) {
-        j = 0;
-      }
-#if 0
-    }
-#endif
+void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
+  size_t hr_index = 0;
+  if (hr != NULL) {
+    hr_index = (size_t) hr->hrs_index();
   }
-  int i;
-  for (i = j; i < len; i += 1) {
-    int res = blk->doHeapRegion(_regions.at(i));
+
+  size_t len = length();
+  for (size_t i = hr_index; i < len; i += 1) {
+    bool res = blk->doHeapRegion(at(i));
     if (res) {
-#if 0
-      cached_j = i;
-#endif
       blk->incomplete();
       return;
     }
   }
-  for (i = 0; i < j; i += 1) {
-    int res = blk->doHeapRegion(_regions.at(i));
+  for (size_t i = 0; i < hr_index; i += 1) {
+    bool res = blk->doHeapRegion(at(i));
     if (res) {
-#if 0
-      cached_j = i;
-#endif
-      blk->incomplete();
-      return;
-    }
-  }
-}
-
-void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
-  int len = _regions.length();
-  int i;
-  for (i = idx; i < len; i++) {
-    if (blk->doHeapRegion(_regions.at(i))) {
-      blk->incomplete();
-      return;
-    }
-  }
-  for (i = 0; i < idx; i++) {
-    if (blk->doHeapRegion(_regions.at(i))) {
       blk->incomplete();
       return;
     }
@@ -283,54 +206,92 @@
 }
 
 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
-                                   size_t& num_regions_deleted) {
+                                   size_t* num_regions_deleted) {
   // Reset this in case it's currently pointing into the regions that
   // we just removed.
-  _alloc_search_start = 0;
+  _next_search_index = 0;
 
   assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
   assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
+  assert(length() > 0, "the region sequence should not be empty");
+  assert(length() <= _allocated_length, "invariant");
+  assert(_allocated_length > 0, "we should have at least one region committed");
 
-  if (_regions.length() == 0) {
-    num_regions_deleted = 0;
-    return MemRegion();
-  }
-  int j = _regions.length() - 1;
-  HeapWord* end = _regions.at(j)->end();
+  // around the loop, i will be the next region to be removed
+  size_t i = length() - 1;
+  assert(i > 0, "we should never remove all regions");
+  // [last_start, end) is the MemRegion that covers the regions we will remove.
+  HeapWord* end = at(i)->end();
   HeapWord* last_start = end;
-  while (j >= 0 && shrink_bytes > 0) {
-    HeapRegion* cur = _regions.at(j);
-    // We have to leave humongous regions where they are,
-    // and work around them.
-    if (cur->isHumongous()) {
-      return MemRegion(last_start, end);
-    }
-    assert(cur == _regions.top(), "Should be top");
+  *num_regions_deleted = 0;
+  while (shrink_bytes > 0) {
+    HeapRegion* cur = at(i);
+    // We should leave the humongous regions where they are.
+    if (cur->isHumongous()) break;
+    // We should stop shrinking if we come across a non-empty region.
     if (!cur->is_empty()) break;
+
+    i -= 1;
+    *num_regions_deleted += 1;
     shrink_bytes -= cur->capacity();
-    num_regions_deleted++;
-    _regions.pop();
     last_start = cur->bottom();
-    // We need to delete these somehow, but can't currently do so here: if
-    // we do, the ZF thread may still access the deleted region.  We'll
-    // leave this here as a reminder that we have to do something about
-    // this.
-    // delete cur;
-    j--;
+    decrement_length(&_length);
+    // We will reclaim the HeapRegion. _allocated_length should be
+    // covering this index. So, even though we removed the region from
+    // the active set by decreasing _length, we still have it
+    // available in the future if we need to re-use it.
+    assert(i > 0, "we should never remove all regions");
+    assert(length() > 0, "we should never remove all regions");
   }
   return MemRegion(last_start, end);
 }
 
-class PrintHeapRegionClosure : public  HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion* r) {
-    gclog_or_tty->print(PTR_FORMAT ":", r);
-    r->print();
-    return false;
+#ifndef PRODUCT
+void HeapRegionSeq::verify_optional() {
+  guarantee(_length <= _allocated_length,
+            err_msg("invariant: _length: "SIZE_FORMAT" "
+                    "_allocated_length: "SIZE_FORMAT,
+                    _length, _allocated_length));
+  guarantee(_allocated_length <= _max_length,
+            err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
+                    "_max_length: "SIZE_FORMAT,
+                    _allocated_length, _max_length));
+  guarantee(_next_search_index <= _length,
+            err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
+                    "_length: "SIZE_FORMAT,
+                    _next_search_index, _length));
+
+  HeapWord* prev_end = _heap_bottom;
+  for (size_t i = 0; i < _allocated_length; i += 1) {
+    HeapRegion* hr = _regions[i];
+    guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
+    guarantee(hr->bottom() == prev_end,
+              err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
+                      "prev_end: "PTR_FORMAT,
+                      i, HR_FORMAT_PARAMS(hr), prev_end));
+    guarantee(hr->hrs_index() == i,
+              err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
+                      i, hr->hrs_index()));
+    if (i < _length) {
+      // Asserts will fire if i is >= _length
+      HeapWord* addr = hr->bottom();
+      guarantee(addr_to_region(addr) == hr, "sanity");
+      guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
+    } else {
+      guarantee(hr->is_empty(), "sanity");
+      guarantee(!hr->isHumongous(), "sanity");
+      // using assert instead of guarantee here since containing_set()
+      // is only available in non-product builds.
+      assert(hr->containing_set() == NULL, "sanity");
+    }
+    if (hr->startsHumongous()) {
+      prev_end = hr->orig_end();
+    } else {
+      prev_end = hr->end();
+    }
   }
-};
-
-void HeapRegionSeq::print() {
-  PrintHeapRegionClosure cl;
-  iterate(&cl);
+  for (size_t i = _allocated_length; i < _max_length; i += 1) {
+    guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
+  }
 }
+#endif // PRODUCT
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -25,92 +25,144 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 
-#include "gc_implementation/g1/heapRegion.hpp"
-#include "utilities/growableArray.hpp"
-
 class HeapRegion;
 class HeapRegionClosure;
+class FreeRegionList;
+
+#define G1_NULL_HRS_INDEX ((size_t) -1)
+
+// This class keeps track of the region metadata (i.e., HeapRegion
+// instances). They are kept in the _regions array in address
+// order. A region's index in the array corresponds to its index in
+// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
+// the one after it, etc.). Two regions that are consecutive in the
+// array should also be adjacent in the address space (i.e.,
+// region(i).end() == region(i+1).bottom().
+//
+// We create a HeapRegion when we commit the region's address space
+// for the first time. When we uncommit the address space of a
+// region we retain the HeapRegion to be able to re-use it in the
+// future (in case we recommit it).
+//
+// We keep track of three lengths:
+//
+// * _length (returned by length()) is the number of currently
+//   committed regions.
+// * _allocated_length (not exposed outside this class) is the
+//   number of regions for which we have HeapRegions.
+// * _max_length (returned by max_length()) is the maximum number of
+//   regions the heap can have.
+//
+// and maintain that: _length <= _allocated_length <= _max_length
 
 class HeapRegionSeq: public CHeapObj {
+  friend class VMStructs;
 
-  // _regions is kept sorted by start address order, and no two regions are
-  // overlapping.
-  GrowableArray<HeapRegion*> _regions;
+  // The array that holds the HeapRegions.
+  HeapRegion** _regions;
+
+  // Version of _regions biased to address 0
+  HeapRegion** _regions_biased;
+
+  // The number of regions committed in the heap.
+  size_t _length;
 
-  // The index in "_regions" at which to start the next allocation search.
-  // (For efficiency only; private to obj_allocate after initialization.)
-  int _alloc_search_start;
+  // The address of the first reserved word in the heap.
+  HeapWord* _heap_bottom;
+
+  // The address of the last reserved word in the heap - 1.
+  HeapWord* _heap_end;
+
+  // The log of the region byte size.
+  size_t _region_shift;
+
+  // A hint for which index to start searching from for humongous
+  // allocations.
+  size_t _next_search_index;
 
-  // Finds a contiguous set of empty regions of length num, starting
-  // from a given index.
-  int find_contiguous_from(int from, size_t num);
+  // The number of regions for which we have allocated HeapRegions for.
+  size_t _allocated_length;
+
+  // The maximum number of regions in the heap.
+  size_t _max_length;
+
+  // Find a contiguous set of empty regions of length num, starting
+  // from the given index.
+  size_t find_contiguous_from(size_t from, size_t num);
 
-  // Currently, we're choosing collection sets in a round-robin fashion,
-  // starting here.
-  int _next_rr_candidate;
+  // Map a heap address to a biased region index. Assume that the
+  // address is valid.
+  inline size_t addr_to_index_biased(HeapWord* addr) const;
 
-  // The bottom address of the bottom-most region, or else NULL if there
-  // are no regions in the sequence.
-  char* _seq_bottom;
+  void increment_length(size_t* length) {
+    assert(*length < _max_length, "pre-condition");
+    *length += 1;
+  }
+
+  void decrement_length(size_t* length) {
+    assert(*length > 0, "pre-condition");
+    *length -= 1;
+  }
 
  public:
-  // Initializes "this" to the empty sequence of regions.
-  HeapRegionSeq(const size_t max_size);
+  // Empty contructor, we'll initialize it with the initialize() method.
+  HeapRegionSeq() { }
+
+  void initialize(HeapWord* bottom, HeapWord* end, size_t max_length);
 
-  // Adds "hr" to "this" sequence.  Requires "hr" not to overlap with
-  // any region already in "this".  (Will perform better if regions are
-  // inserted in ascending address order.)
-  void insert(HeapRegion* hr);
+  // Return the HeapRegion at the given index. Assume that the index
+  // is valid.
+  inline HeapRegion* at(size_t index) const;
+
+  // If addr is within the committed space return its corresponding
+  // HeapRegion, otherwise return NULL.
+  inline HeapRegion* addr_to_region(HeapWord* addr) const;
+
+  // Return the HeapRegion that corresponds to the given
+  // address. Assume the address is valid.
+  inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
 
-  // Given a HeapRegion*, returns its index within _regions,
-  // or returns -1 if not found.
-  int find(HeapRegion* hr);
+  // Return the number of regions that have been committed in the heap.
+  size_t length() const { return _length; }
+
+  // Return the maximum number of regions in the heap.
+  size_t max_length() const { return _max_length; }
 
-  // Requires the index to be valid, and return the region at the index.
-  HeapRegion* at(size_t i) { return _regions.at((int)i); }
+  // Expand the sequence to reflect that the heap has grown from
+  // old_end to new_end. Either create new HeapRegions, or re-use
+  // existing ones, and return them in the given list. Returns the
+  // memory region that covers the newly-created regions. If a
+  // HeapRegion allocation fails, the result memory region might be
+  // smaller than the desired one.
+  MemRegion expand_by(HeapWord* old_end, HeapWord* new_end,
+                      FreeRegionList* list);
 
-  // Return the number of regions in the sequence.
-  size_t length();
-
-  // Returns the number of contiguous regions at the end of the sequence
+  // Return the number of contiguous regions at the end of the sequence
   // that are available for allocation.
   size_t free_suffix();
 
   // Find a contiguous set of empty regions of length num and return
-  // the index of the first region or -1 if the search was unsuccessful.
-  int find_contiguous(size_t num);
+  // the index of the first region or G1_NULL_HRS_INDEX if the
+  // search was unsuccessful.
+  size_t find_contiguous(size_t num);
 
-  // Apply the "doHeapRegion" method of "blk" to all regions in "this",
-  // in address order, terminating the iteration early
-  // if the "doHeapRegion" method returns "true".
-  void iterate(HeapRegionClosure* blk);
-
-  // Apply the "doHeapRegion" method of "blk" to all regions in "this",
-  // starting at "r" (or first region, if "r" is NULL), in a circular
-  // manner, terminating the iteration early if the "doHeapRegion" method
-  // returns "true".
-  void iterate_from(HeapRegion* r, HeapRegionClosure* blk);
+  // Apply blk->doHeapRegion() on all committed regions in address order,
+  // terminating the iteration early if doHeapRegion() returns true.
+  void iterate(HeapRegionClosure* blk) const;
 
-  // As above, but start from a given index in the sequence
-  // instead of a given heap region.
-  void iterate_from(int idx, HeapRegionClosure* blk);
+  // As above, but start the iteration from hr and loop around. If hr
+  // is NULL, we start from the first region in the heap.
+  void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
 
-  // Requires "shrink_bytes" to be a multiple of the page size and heap
-  // region granularity.  Deletes as many "rightmost" completely free heap
-  // regions from the sequence as comprise shrink_bytes bytes.  Returns the
-  // MemRegion indicating the region those regions comprised, and sets
-  // "num_regions_deleted" to the number of regions deleted.
-  MemRegion shrink_by(size_t shrink_bytes, size_t& num_regions_deleted);
+  // Tag as uncommitted as many regions that are completely free as
+  // possible, up to shrink_bytes, from the suffix of the committed
+  // sequence. Return a MemRegion that corresponds to the address
+  // range of the uncommitted regions. Assume shrink_bytes is page and
+  // heap region aligned.
+  MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted);
 
-  // If "addr" falls within a region in the sequence, return that region,
-  // or else NULL.
-  inline HeapRegion* addr_to_region(const void* addr);
-
-  void print();
-
-  // Prints out runs of empty regions.
-  void print_empty_runs();
-
+  // Do some sanity checking.
+  void verify_optional() PRODUCT_RETURN;
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,23 +25,42 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
 
+#include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 
-inline HeapRegion* HeapRegionSeq::addr_to_region(const void* addr) {
-  assert(_seq_bottom != NULL, "bad _seq_bottom in addr_to_region");
-  if ((char*) addr >= _seq_bottom) {
-    size_t diff = (size_t) pointer_delta((HeapWord*) addr,
-                                         (HeapWord*) _seq_bottom);
-    int index = (int) (diff >> HeapRegion::LogOfHRGrainWords);
-    assert(index >= 0, "invariant / paranoia");
-    if (index < _regions.length()) {
-      HeapRegion* hr = _regions.at(index);
-      assert(hr->is_in_reserved(addr),
-             "addr_to_region is wrong...");
-      return hr;
-    }
+inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
+  assert(_heap_bottom <= addr && addr < _heap_end,
+         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
+                 addr, _heap_bottom, _heap_end));
+  size_t index = (size_t) addr >> _region_shift;
+  return index;
+}
+
+inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
+  assert(_heap_bottom <= addr && addr < _heap_end,
+         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
+                 addr, _heap_bottom, _heap_end));
+  size_t index_biased = addr_to_index_biased(addr);
+  HeapRegion* hr = _regions_biased[index_biased];
+  assert(hr != NULL, "invariant");
+  return hr;
+}
+
+inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
+  if (addr != NULL && addr < _heap_end) {
+    assert(addr >= _heap_bottom,
+          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
+    return addr_to_region_unsafe(addr);
   }
   return NULL;
 }
 
+inline HeapRegion* HeapRegionSeq::at(size_t index) const {
+  assert(index < length(), "pre-condition");
+  HeapRegion* hr = _regions[index];
+  assert(hr != NULL, "sanity");
+  assert(hr->hrs_index() == index, "sanity");
+  return hr;
+}
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
 
 //////////////////// FreeRegionList ////////////////////
@@ -38,6 +39,16 @@
 
 //////////////////// MasterFreeRegionList ////////////////////
 
+const char* MasterFreeRegionList::verify_region_extra(HeapRegion* hr) {
+  // We should reset the RSet for parallel iteration before we add it
+  // to the master free list so that it is ready when the region is
+  // re-allocated.
+  if (!hr->rem_set()->verify_ready_for_par_iteration()) {
+    return "the region's RSet should be ready for parallel iteration";
+  }
+  return FreeRegionList::verify_region_extra(hr);
+}
+
 bool MasterFreeRegionList::check_mt_safety() {
   // Master Free List MT safety protocol:
   // (a) If we're at a safepoint, operations on the master free list
--- a/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -44,6 +44,7 @@
 
 class MasterFreeRegionList : public FreeRegionList {
 protected:
+  virtual const char* verify_region_extra(HeapRegion* hr);
   virtual bool check_mt_safety();
 
 public:
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -29,6 +29,7 @@
 #include "memory/sharedHeap.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.hpp"
+#include "runtime/vmThread.hpp"
 
 // This method removes entries from an SATB buffer that will not be
 // useful to the concurrent marking threads. An entry is removed if it
@@ -252,9 +253,18 @@
       t->satb_mark_queue().apply_closure(_par_closures[worker]);
     }
   }
-  // We'll have worker 0 do this one.
-  if (worker == 0) {
-    shared_satb_queue()->apply_closure(_par_closures[0]);
+
+  // We also need to claim the VMThread so that its parity is updated
+  // otherwise the next call to Thread::possibly_parallel_oops_do inside
+  // a StrongRootsScope might skip the VMThread because it has a stale
+  // parity that matches the parity set by the StrongRootsScope
+  //
+  // Whichever worker succeeds in claiming the VMThread gets to do
+  // the shared queue.
+
+  VMThread* vmt = VMThread::vm_thread();
+  if (vmt->claim_oops_do(true, parity)) {
+    shared_satb_queue()->apply_closure(_par_closures[worker]);
   }
 }
 
--- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -481,8 +481,9 @@
 
 bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
 #if SPARSE_PRT_VERBOSE
-  gclog_or_tty->print_cr("  Adding card %d from region %d to region %d sparse.",
-                card_index, region_id, _hr->hrs_index());
+  gclog_or_tty->print_cr("  Adding card %d from region %d to region "
+                         SIZE_FORMAT" sparse.",
+                         card_index, region_id, _hr->hrs_index());
 #endif
   if (_next->occupied_entries() * 2 > _next->capacity()) {
     expand();
@@ -533,8 +534,8 @@
   _next = new RSHashTable(last->capacity() * 2);
 
 #if SPARSE_PRT_VERBOSE
-  gclog_or_tty->print_cr("  Expanded sparse table for %d to %d.",
-                _hr->hrs_index(), _next->capacity());
+  gclog_or_tty->print_cr("  Expanded sparse table for "SIZE_FORMAT" to %d.",
+                         _hr->hrs_index(), _next->capacity());
 #endif
   for (size_t i = 0; i < last->capacity(); i++) {
     SparsePRTEntry* e = last->entry((int)i);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
+
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+
+#define VM_STRUCTS_G1(nonstatic_field, static_field)                          \
+                                                                              \
+  static_field(HeapRegion, GrainBytes, int)                                   \
+                                                                              \
+  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
+  nonstatic_field(HeapRegionSeq,   _length,  size_t)                          \
+                                                                              \
+  nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
+  nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
+  nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
+  nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
+                                                                              \
+  nonstatic_field(G1MonitoringSupport, _eden_committed,     size_t)           \
+  nonstatic_field(G1MonitoringSupport, _eden_used,          size_t)           \
+  nonstatic_field(G1MonitoringSupport, _survivor_committed, size_t)           \
+  nonstatic_field(G1MonitoringSupport, _survivor_used,      size_t)           \
+  nonstatic_field(G1MonitoringSupport, _old_committed,      size_t)           \
+  nonstatic_field(G1MonitoringSupport, _old_used,           size_t)           \
+
+
+#define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
+                                                                              \
+  declare_type(G1CollectedHeap, SharedHeap)                                   \
+                                                                              \
+  declare_type(HeapRegion, ContiguousSpace)                                   \
+  declare_toplevel_type(HeapRegionSeq)                                        \
+  declare_toplevel_type(G1MonitoringSupport)                                  \
+                                                                              \
+  declare_toplevel_type(G1CollectedHeap*)                                     \
+  declare_toplevel_type(HeapRegion*)                                          \
+  declare_toplevel_type(G1MonitoringSupport*)                                 \
+
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -98,7 +98,19 @@
 
     // At this point we are supposed to start a concurrent cycle. We
     // will do so if one is not already in progress.
-    bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
+    bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
+
+    // The above routine returns true if we were able to force the
+    // next GC pause to be an initial mark; it returns false if a
+    // marking cycle is already in progress.
+    //
+    // If a marking cycle is already in progress just return and skip
+    // the pause - the requesting thread should block in doit_epilogue
+    // until the marking cycle is complete.
+    if (!res) {
+      assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating");
+      return;
+    }
   }
 
   _pause_succeeded =
--- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -348,15 +348,31 @@
         // cleared before we had a chance to examine it. In that case, the value
         // will have been logged in the LNC for that chunk.
         // We need to examine as many chunks to the right as this object
-        // covers.
-        const uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
-                                                    - lowest_non_clean_base_chunk_index;
-        DEBUG_ONLY(const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())
-                                                      - lowest_non_clean_base_chunk_index;)
-        assert(last_chunk_index_to_check <= last_chunk_index,
-               err_msg("Out of bounds: last_chunk_index_to_check " INTPTR_FORMAT
-                       " exceeds last_chunk_index " INTPTR_FORMAT,
-                       last_chunk_index_to_check, last_chunk_index));
+        // covers. However, we need to bound this checking to the largest
+        // entry in the LNC array: this is because the heap may expand
+        // after the LNC array has been created but before we reach this point,
+        // and the last block in our chunk may have been expanded to include
+        // the expansion delta (and possibly subsequently allocated from, so
+        // it wouldn't be sufficient to check whether that last block was
+        // or was not an object at this point).
+        uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
+                                              - lowest_non_clean_base_chunk_index;
+        const uintptr_t last_chunk_index    = addr_to_chunk_index(used.last())
+                                              - lowest_non_clean_base_chunk_index;
+        if (last_chunk_index_to_check > last_chunk_index) {
+          assert(last_block + last_block_size > used.end(),
+                 err_msg("Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
+                         " does not exceed used.end() = " PTR_FORMAT ","
+                         " yet last_chunk_index_to_check " INTPTR_FORMAT
+                         " exceeds last_chunk_index " INTPTR_FORMAT,
+                         last_chunk_index_to_check, last_chunk_index));
+          assert(sp->used_region().end() > used.end(),
+                 err_msg("Expansion did not happen: "
+                         "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
+                         sp->used_region().start(), sp->used_region().end(), used.start(), used.end()));
+          NOISY(tty->print_cr(" process_chunk_boundary: heap expanded; explicitly bounding last_chunk");)
+          last_chunk_index_to_check = last_chunk_index;
+        }
         for (uintptr_t lnc_index = cur_chunk_index + 1;
              lnc_index <= last_chunk_index_to_check;
              lnc_index++) {
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -386,8 +386,6 @@
 // we rely on the size_policy object to force a bail out.
 HeapWord* ParallelScavengeHeap::mem_allocate(
                                      size_t size,
-                                     bool is_noref,
-                                     bool is_tlab,
                                      bool* gc_overhead_limit_was_exceeded) {
   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
@@ -398,7 +396,7 @@
   // limit is being exceeded as checked below.
   *gc_overhead_limit_was_exceeded = false;
 
-  HeapWord* result = young_gen()->allocate(size, is_tlab);
+  HeapWord* result = young_gen()->allocate(size);
 
   uint loop_count = 0;
   uint gc_count = 0;
@@ -419,7 +417,7 @@
       MutexLocker ml(Heap_lock);
       gc_count = Universe::heap()->total_collections();
 
-      result = young_gen()->allocate(size, is_tlab);
+      result = young_gen()->allocate(size);
 
       // (1) If the requested object is too large to easily fit in the
       //     young_gen, or
@@ -433,21 +431,13 @@
       if (result != NULL) {
         return result;
       }
-      if (!is_tlab &&
-          size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
-        result = old_gen()->allocate(size, is_tlab);
+      if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
+        result = old_gen()->allocate(size);
         if (result != NULL) {
           return result;
         }
       }
       if (GC_locker::is_active_and_needs_gc()) {
-        // GC is locked out. If this is a TLAB allocation,
-        // return NULL; the requestor will retry allocation
-        // of an idividual object at a time.
-        if (is_tlab) {
-          return NULL;
-        }
-
         // If this thread is not in a jni critical section, we stall
         // the requestor until the critical section has cleared and
         // GC allowed. When the critical section clears, a GC is
@@ -472,7 +462,7 @@
     if (result == NULL) {
 
       // Generate a VM operation
-      VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
+      VM_ParallelGCFailedAllocation op(size, gc_count);
       VMThread::execute(&op);
 
       // Did the VM operation execute? If so, return the result directly.
@@ -526,7 +516,7 @@
     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
         (loop_count % QueuedAllocationWarningCount == 0)) {
       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
-              " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
+              " size=%d", loop_count, size);
     }
   }
 
@@ -539,7 +529,7 @@
 // time over limit here, that is the responsibility of the heap specific
 // collection methods. This method decides where to attempt allocations,
 // and when to attempt collections, but no collection specific policy.
-HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
+HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   assert(!Universe::heap()->is_gc_active(), "not reentrant");
@@ -553,7 +543,7 @@
   // First level allocation failure, scavenge and allocate in young gen.
   GCCauseSetter gccs(this, GCCause::_allocation_failure);
   PSScavenge::invoke();
-  HeapWord* result = young_gen()->allocate(size, is_tlab);
+  HeapWord* result = young_gen()->allocate(size);
 
   // Second level allocation failure.
   //   Mark sweep and allocate in young generation.
@@ -562,28 +552,28 @@
     // Don't mark sweep twice if so.
     if (mark_sweep_invocation_count == total_invocations()) {
       invoke_full_gc(false);
-      result = young_gen()->allocate(size, is_tlab);
+      result = young_gen()->allocate(size);
     }
   }
 
   // Third level allocation failure.
   //   After mark sweep and young generation allocation failure,
   //   allocate in old generation.
-  if (result == NULL && !is_tlab) {
-    result = old_gen()->allocate(size, is_tlab);
+  if (result == NULL) {
+    result = old_gen()->allocate(size);
   }
 
   // Fourth level allocation failure. We're running out of memory.
   //   More complete mark sweep and allocate in young generation.
   if (result == NULL) {
     invoke_full_gc(true);
-    result = young_gen()->allocate(size, is_tlab);
+    result = young_gen()->allocate(size);
   }
 
   // Fifth level allocation failure.
   //   After more complete mark sweep, allocate in old generation.
-  if (result == NULL && !is_tlab) {
-    result = old_gen()->allocate(size, is_tlab);
+  if (result == NULL) {
+    result = old_gen()->allocate(size);
   }
 
   return result;
@@ -761,7 +751,7 @@
 }
 
 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
-  return young_gen()->allocate(size, true);
+  return young_gen()->allocate(size);
 }
 
 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
@@ -901,7 +891,7 @@
 }
 
 
-void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
+void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
   // Why do we need the total_collections()-filter below?
   if (total_collections() > 0) {
     if (!silent) {
@@ -919,10 +909,6 @@
     }
     young_gen()->verify(allow_dirty);
   }
-  if (!silent) {
-    gclog_or_tty->print("ref_proc ");
-  }
-  ReferenceProcessor::verify();
 }
 
 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -165,12 +165,13 @@
   // an excessive amount of time is being spent doing collections
   // and caused a NULL to be returned.  If a NULL is not returned,
   // "gc_time_limit_was_exceeded" has an undefined meaning.
+  HeapWord* mem_allocate(size_t size,
+                         bool* gc_overhead_limit_was_exceeded);
 
-  HeapWord* mem_allocate(size_t size,
-                         bool is_noref,
-                         bool is_tlab,
-                         bool* gc_overhead_limit_was_exceeded);
-  HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
+  // Allocation attempt(s) during a safepoint. It should never be called
+  // to allocate a new TLAB as this allocation might be satisfied out
+  // of the old generation.
+  HeapWord* failed_mem_allocate(size_t size);
 
   HeapWord* permanent_mem_allocate(size_t size);
   HeapWord* failed_permanent_mem_allocate(size_t size);
@@ -194,8 +195,6 @@
   inline void invoke_scavenge();
   inline void invoke_full_gc(bool maximum_compaction);
 
-  size_t large_typearray_limit() { return FastAllocateSizeLimit; }
-
   bool supports_inline_contig_alloc() const { return !UseNUMA; }
 
   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
@@ -253,7 +252,7 @@
   virtual void gc_threads_do(ThreadClosure* tc) const;
   virtual void print_tracing_info() const;
 
-  void verify(bool allow_dirty, bool silent, bool /* option */);
+  void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
 
   void print_heap_change(size_t prev_used);
 
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -80,10 +80,6 @@
       Universe::oops_do(&mark_and_push_closure);
       break;
 
-    case reference_processing:
-      ReferenceProcessor::oops_do(&mark_and_push_closure);
-      break;
-
     case jni_handles:
       JNIHandles::oops_do(&mark_and_push_closure);
       break;
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -98,8 +98,7 @@
     management            = 6,
     jvmti                 = 7,
     system_dictionary     = 8,
-    reference_processing  = 9,
-    code_cache            = 10
+    code_cache            = 9
   };
  private:
   RootType _root_type;
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -198,10 +198,9 @@
 
     allocate_stacks();
 
-    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    ref_processor()->enable_discovery();
+    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
     ref_processor()->setup_policy(clear_all_softrefs);
 
     mark_sweep_phase1(clear_all_softrefs);
@@ -516,7 +515,6 @@
   {
     ParallelScavengeHeap::ParStrongRootsScope psrs;
     Universe::oops_do(mark_and_push_closure());
-    ReferenceProcessor::oops_do(mark_and_push_closure());
     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
     CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
     Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
@@ -623,7 +621,6 @@
 
   // General strong roots.
   Universe::oops_do(adjust_root_pointer_closure());
-  ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
   Threads::oops_do(adjust_root_pointer_closure(), NULL);
   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -182,12 +182,12 @@
 
 // Allocation. We report all successful allocations to the size policy
 // Note that the perm gen does not use this method, and should not!
-HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
+HeapWord* PSOldGen::allocate(size_t word_size) {
   assert_locked_or_safepoint(Heap_lock);
-  HeapWord* res = allocate_noexpand(word_size, is_tlab);
+  HeapWord* res = allocate_noexpand(word_size);
 
   if (res == NULL) {
-    res = expand_and_allocate(word_size, is_tlab);
+    res = expand_and_allocate(word_size);
   }
 
   // Allocations in the old generation need to be reported
@@ -199,13 +199,12 @@
   return res;
 }
 
-HeapWord* PSOldGen::expand_and_allocate(size_t word_size, bool is_tlab) {
-  assert(!is_tlab, "TLAB's are not supported in PSOldGen");
+HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
   expand(word_size*HeapWordSize);
   if (GCExpandToAllocateDelayMillis > 0) {
     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
   }
-  return allocate_noexpand(word_size, is_tlab);
+  return allocate_noexpand(word_size);
 }
 
 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -60,9 +60,8 @@
   // Used when initializing the _name field.
   static inline const char* select_name();
 
-  HeapWord* allocate_noexpand(size_t word_size, bool is_tlab) {
+  HeapWord* allocate_noexpand(size_t word_size) {
     // We assume the heap lock is held here.
-    assert(!is_tlab, "Does not support TLAB allocation");
     assert_locked_or_safepoint(Heap_lock);
     HeapWord* res = object_space()->allocate(word_size);
     if (res != NULL) {
@@ -89,7 +88,7 @@
     return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
   }
 
-  HeapWord* expand_and_allocate(size_t word_size, bool is_tlab);
+  HeapWord* expand_and_allocate(size_t word_size);
   HeapWord* expand_and_cas_allocate(size_t word_size);
   void expand(size_t bytes);
   bool expand_by(size_t bytes);
@@ -164,7 +163,7 @@
 
   // Allocation. We report all successful allocations to the size policy
   // Note that the perm gen does not use this method, and should not!
-  HeapWord* allocate(size_t word_size, bool is_tlab);
+  HeapWord* allocate(size_t word_size);
 
   // Iteration.
   void oop_iterate(OopClosure* cl) { object_space()->oop_iterate(cl); }
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2069,10 +2069,9 @@
     CodeCache::gc_prologue();
     Threads::gc_prologue();
 
-    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    ref_processor()->enable_discovery();
+    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
     ref_processor()->setup_policy(maximum_heap_compaction);
 
     bool marked_for_unloading = false;
@@ -2445,7 +2444,6 @@
 
   // General strong roots.
   Universe::oops_do(adjust_root_pointer_closure());
-  ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
   Threads::oops_do(adjust_root_pointer_closure(), NULL);
   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
--- a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -46,10 +46,10 @@
 
 HeapWord* PSPermGen::allocate_permanent(size_t size) {
   assert_locked_or_safepoint(Heap_lock);
-  HeapWord* obj = allocate_noexpand(size, false);
+  HeapWord* obj = allocate_noexpand(size);
 
   if (obj == NULL) {
-    obj = expand_and_allocate(size, false);
+    obj = expand_and_allocate(size);
   }
 
   return obj;
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -350,10 +350,9 @@
     }
     save_to_space_top_before_gc();
 
-    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    reference_processor()->enable_discovery();
+    reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
     reference_processor()->setup_policy(false);
 
     // We track how much was promoted to the next generation for
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -55,7 +55,6 @@
   switch (_root_type) {
     case universe:
       Universe::oops_do(&roots_closure);
-      ReferenceProcessor::oops_do(&roots_closure);
       break;
 
     case jni_handles:
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -157,7 +157,7 @@
   }
 
   // Allocation
-  HeapWord* allocate(size_t word_size, bool is_tlab) {
+  HeapWord* allocate(size_t word_size) {
     HeapWord* result = eden_space()->cas_allocate(word_size);
     return result;
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -33,10 +33,9 @@
 
 // The following methods are used by the parallel scavenge collector
 VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
-  bool is_tlab, unsigned int gc_count) :
+                                                      unsigned int gc_count) :
   VM_GC_Operation(gc_count, GCCause::_allocation_failure),
   _size(size),
-  _is_tlab(is_tlab),
   _result(NULL)
 {
 }
@@ -48,7 +47,7 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
 
   GCCauseSetter gccs(heap, _gc_cause);
-  _result = heap->failed_mem_allocate(_size, _is_tlab);
+  _result = heap->failed_mem_allocate(_size);
 
   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,12 +32,10 @@
 class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
  private:
   size_t    _size;
-  bool      _is_tlab;
   HeapWord* _result;
 
  public:
-  VM_ParallelGCFailedAllocation(size_t size, bool is_tlab,
-                                unsigned int gc_count);
+  VM_ParallelGCFailedAllocation(size_t size, unsigned int gc_count);
 
   virtual VMOp_Type type() const {
     return VMOp_ParallelGCFailedAllocation;
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -99,14 +99,16 @@
     // vulnerable to noisy glitches. In such cases, we
     // ignore the current sample and use currently available
     // historical estimates.
-    // XXX NEEDS TO BE FIXED
-    // assert(prevSweep() + splitBirths() >= splitDeaths() + (ssize_t)count, "Conservation Principle");
-    //     ^^^^^^^^^^^^^^^^^^^^^^^^^^^    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    //     "Total Stock"                  "Not used at this block size"
+    assert(prevSweep() + splitBirths() + coalBirths()        // "Total Production Stock"
+           >= splitDeaths() + coalDeaths() + (ssize_t)count, // "Current stock + depletion"
+           "Conservation Principle");
     if (inter_sweep_current > _threshold) {
-      ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() - splitDeaths();
-      // XXX NEEDS TO BE FIXED
-      // assert(demand >= 0, "Demand should be non-negative");
+      ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() + coalBirths()
+                       - splitDeaths() - coalDeaths();
+      assert(demand >= 0,
+             err_msg("Demand (" SSIZE_FORMAT ") should be non-negative for "
+                     PTR_FORMAT " (size=" SIZE_FORMAT ")",
+                     demand, this, count));
       // Defensive: adjust for imprecision in event counting
       if (demand < 0) {
         demand = 0;
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -43,17 +43,6 @@
   _sts.initialize();
 };
 
-void ConcurrentGCThread::stopWorldAndDo(VoidClosure* op) {
-  MutexLockerEx x(Heap_lock,
-                  Mutex::_no_safepoint_check_flag);
-  // warning("CGC: about to try stopping world");
-  SafepointSynchronize::begin();
-  // warning("CGC: successfully stopped world");
-  op->do_void();
-  SafepointSynchronize::end();
-  // warning("CGC: successfully restarted world");
-}
-
 void ConcurrentGCThread::safepoint_synchronize() {
   _sts.suspend_all();
 }
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -95,8 +95,6 @@
   static int set_CGC_flag(int b)           { return _CGC_flag |= b; }
   static int reset_CGC_flag(int b)         { return _CGC_flag &= ~b; }
 
-  void stopWorldAndDo(VoidClosure* op);
-
   // All instances share this one set.
   static SuspendibleThreadSet _sts;
 
--- a/src/share/vm/gc_implementation/shared/generationCounters.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/shared/generationCounters.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -26,14 +26,10 @@
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/resourceArea.hpp"
 
-
-GenerationCounters::GenerationCounters(const char* name,
-                                       int ordinal, int spaces,
-                                       VirtualSpace* v):
-                    _virtual_space(v) {
-
+void GenerationCounters::initialize(const char* name, int ordinal, int spaces,
+                                    size_t min_capacity, size_t max_capacity,
+                                    size_t curr_capacity) {
   if (UsePerfData) {
-
     EXCEPTION_MARK;
     ResourceMark rm;
 
@@ -51,18 +47,37 @@
 
     cname = PerfDataManager::counter_name(_name_space, "minCapacity");
     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     _virtual_space == NULL ? 0 :
-                                     _virtual_space->committed_size(), CHECK);
+                                     min_capacity, CHECK);
 
     cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     _virtual_space == NULL ? 0 :
-                                     _virtual_space->reserved_size(), CHECK);
+                                     max_capacity, CHECK);
 
     cname = PerfDataManager::counter_name(_name_space, "capacity");
-    _current_size = PerfDataManager::create_variable(SUN_GC, cname,
-                                     PerfData::U_Bytes,
-                                     _virtual_space == NULL ? 0 :
-                                     _virtual_space->committed_size(), CHECK);
+    _current_size =
+      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+                                       curr_capacity, CHECK);
   }
 }
+
+GenerationCounters::GenerationCounters(const char* name,
+                                       int ordinal, int spaces,
+                                       VirtualSpace* v)
+  : _virtual_space(v) {
+  assert(v != NULL, "don't call this constructor if v == NULL");
+  initialize(name, ordinal, spaces,
+             v->committed_size(), v->reserved_size(), v->committed_size());
+}
+
+GenerationCounters::GenerationCounters(const char* name,
+                                       int ordinal, int spaces,
+                                       size_t min_capacity, size_t max_capacity,
+                                       size_t curr_capacity)
+  : _virtual_space(NULL) {
+  initialize(name, ordinal, spaces, min_capacity, max_capacity, curr_capacity);
+}
+
+void GenerationCounters::update_all() {
+  assert(_virtual_space != NULL, "otherwise, override this method");
+  _current_size->set_value(_virtual_space->committed_size());
+}
--- a/src/share/vm/gc_implementation/shared/generationCounters.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_implementation/shared/generationCounters.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -34,6 +34,11 @@
 class GenerationCounters: public CHeapObj {
   friend class VMStructs;
 
+private:
+  void initialize(const char* name, int ordinal, int spaces,
+                  size_t min_capacity, size_t max_capacity,
+                  size_t curr_capacity);
+
  protected:
   PerfVariable*      _current_size;
   VirtualSpace*      _virtual_space;
@@ -48,11 +53,18 @@
   char*              _name_space;
 
   // This constructor is only meant for use with the PSGenerationCounters
-  // constructor.  The need for such an constructor should be eliminated
+  // constructor. The need for such an constructor should be eliminated
   // when VirtualSpace and PSVirtualSpace are unified.
-  GenerationCounters() : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
+  GenerationCounters()
+             : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
+
+  // This constructor is used for subclasses that do not have a space
+  // associated with them (e.g, in G1).
+  GenerationCounters(const char* name, int ordinal, int spaces,
+                     size_t min_capacity, size_t max_capacity,
+                     size_t curr_capacity);
+
  public:
-
   GenerationCounters(const char* name, int ordinal, int spaces,
                      VirtualSpace* v);
 
@@ -60,10 +72,7 @@
     if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
   }
 
-  virtual void update_all() {
-    _current_size->set_value(_virtual_space == NULL ? 0 :
-                             _virtual_space->committed_size());
-  }
+  virtual void update_all();
 
   const char* name_space() const        { return _name_space; }
 
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -157,8 +157,14 @@
     // ..and clear it.
     Copy::zero_to_words(obj, new_tlab_size);
   } else {
-    // ...and clear just the allocated object.
-    Copy::zero_to_words(obj, size);
+    // ...and zap just allocated object.
+#ifdef ASSERT
+    // Skip mangling the space corresponding to the object header to
+    // ensure that the returned space is not considered parsable by
+    // any concurrent GC thread.
+    size_t hdr_size = oopDesc::header_size();
+    Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
+#endif // ASSERT
   }
   thread->tlab().fill(obj, obj + size, new_tlab_size);
   return obj;
@@ -404,13 +410,13 @@
 
 void CollectedHeap::pre_full_gc_dump() {
   if (HeapDumpBeforeFullGC) {
-    TraceTime tt("Heap Dump: ", PrintGCDetails, false, gclog_or_tty);
+    TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty);
     // We are doing a "major" collection and a heap dump before
     // major collection has been requested.
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramBeforeFullGC) {
-    TraceTime tt("Class Histogram: ", PrintGCDetails, true, gclog_or_tty);
+    TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty);
     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
     inspector.doit();
   }
@@ -418,11 +424,11 @@
 
 void CollectedHeap::post_full_gc_dump() {
   if (HeapDumpAfterFullGC) {
-    TraceTime tt("Heap Dump", PrintGCDetails, false, gclog_or_tty);
+    TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty);
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramAfterFullGC) {
-    TraceTime tt("Class Histogram", PrintGCDetails, true, gclog_or_tty);
+    TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty);
     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
     inspector.doit();
   }
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,7 +93,7 @@
   // pure virtual.
   void pre_initialize();
 
-  // Create a new tlab
+  // Create a new tlab. All TLAB allocations must go through this.
   virtual HeapWord* allocate_new_tlab(size_t size);
 
   // Accumulate statistics on all tlabs.
@@ -109,11 +109,11 @@
 
   // Allocate an uninitialized block of the given size, or returns NULL if
   // this is impossible.
-  inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS);
+  inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS);
 
   // Like allocate_init, but the block returned by a successful allocation
   // is guaranteed initialized to zeros.
-  inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS);
+  inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS);
 
   // Same as common_mem version, except memory is allocated in the permanent area
   // If there is no permanent area, revert to common_mem_allocate_noinit
@@ -322,7 +322,6 @@
   // General obj/array allocation facilities.
   inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
   inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
-  inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS);
 
   // Special obj/array allocation facilities.
   // Some heaps may want to manage "permanent" data uniquely. These default
@@ -345,16 +344,12 @@
   // Raw memory allocation facilities
   // The obj and array allocate methods are covers for these methods.
   // The permanent allocation method should default to mem_allocate if
-  // permanent memory isn't supported.
+  // permanent memory isn't supported. mem_allocate() should never be
+  // called to allocate TLABs, only individual objects.
   virtual HeapWord* mem_allocate(size_t size,
-                                 bool is_noref,
-                                 bool is_tlab,
                                  bool* gc_overhead_limit_was_exceeded) = 0;
   virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
 
-  // The boundary between a "large" and "small" array of primitives, in words.
-  virtual size_t large_typearray_limit() = 0;
-
   // Utilities for turning raw memory into filler objects.
   //
   // min_fill_size() is the smallest region that can be filled.
@@ -606,7 +601,7 @@
   virtual void print_tracing_info() const = 0;
 
   // Heap verification
-  virtual void verify(bool allow_dirty, bool silent, bool option) = 0;
+  virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
 
   // Non product verification and debugging.
 #ifndef PRODUCT
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -122,7 +122,7 @@
   post_allocation_notify(klass, (oop)obj);
 }
 
-HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
+HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) {
 
   // Clear unhandled oops for memory allocation.  Memory allocation might
   // not take out a lock if from tlab, so clear here.
@@ -133,7 +133,6 @@
     return NULL;  // caller does a CHECK_0 too
   }
 
-  // We may want to update this, is_noref objects might not be allocated in TLABs.
   HeapWord* result = NULL;
   if (UseTLAB) {
     result = CollectedHeap::allocate_from_tlab(THREAD, size);
@@ -145,8 +144,6 @@
   }
   bool gc_overhead_limit_was_exceeded = false;
   result = Universe::heap()->mem_allocate(size,
-                                          is_noref,
-                                          false,
                                           &gc_overhead_limit_was_exceeded);
   if (result != NULL) {
     NOT_PRODUCT(Universe::heap()->
@@ -183,8 +180,8 @@
   }
 }
 
-HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, bool is_noref, TRAPS) {
-  HeapWord* obj = common_mem_allocate_noinit(size, is_noref, CHECK_NULL);
+HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) {
+  HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
   init_obj(obj, size);
   return obj;
 }
@@ -255,7 +252,7 @@
   debug_only(check_for_valid_allocation_state());
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
+  HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
   post_allocation_setup_obj(klass, obj, size);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
@@ -268,20 +265,7 @@
   debug_only(check_for_valid_allocation_state());
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
-  post_allocation_setup_array(klass, obj, size, length);
-  NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
-  return (oop)obj;
-}
-
-oop CollectedHeap::large_typearray_allocate(KlassHandle klass,
-                                            int size,
-                                            int length,
-                                            TRAPS) {
-  debug_only(check_for_valid_allocation_state());
-  assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
-  assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL);
+  HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
   post_allocation_setup_array(klass, obj, size, length);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
@@ -303,7 +287,10 @@
   assert(size >= 0, "int won't convert to size_t");
   HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
   post_allocation_setup_no_klass_install(klass, obj, size);
-  NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
+#ifndef PRODUCT
+  const size_t hs = oopDesc::header_size();
+  Universe::heap()->check_for_bad_heap_word_value(obj+hs, size-hs);
+#endif
   return (oop)obj;
 }
 
--- a/src/share/vm/interpreter/bytecodes.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/interpreter/bytecodes.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -419,6 +419,8 @@
 
   static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
                                                            || code == _fconst_0 || code == _dconst_0); }
+  static bool        is_invoke      (Code code)    { return (_invokevirtual <= code && code <= _invokedynamic); }
+
   static int         compute_flags  (const char* format, int more_flags = 0);  // compute the flags
   static int         flags          (int code, bool is_wide) {
     assert(code == (u_char)code, "must be a byte");
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -509,6 +509,7 @@
   // resolve field
   FieldAccessInfo info;
   constantPoolHandle pool(thread, method(thread)->constants());
+  bool is_put    = (bytecode == Bytecodes::_putfield  || bytecode == Bytecodes::_putstatic);
   bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
 
   {
@@ -528,8 +529,6 @@
   // exceptions at the correct place. If we do not resolve completely
   // in the current pass, leaving the put_code set to zero will
   // cause the next put instruction to reresolve.
-  bool is_put = (bytecode == Bytecodes::_putfield ||
-                 bytecode == Bytecodes::_putstatic);
   Bytecodes::Code put_code = (Bytecodes::Code)0;
 
   // We also need to delay resolving getstatic instructions until the
@@ -541,7 +540,6 @@
                                !klass->is_initialized());
   Bytecodes::Code get_code = (Bytecodes::Code)0;
 
-
   if (!uninitialized_static) {
     get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield);
     if (is_put || !info.access_flags().is_final()) {
@@ -549,6 +547,23 @@
     }
   }
 
+  if (is_put && !is_static && klass->is_subclass_of(SystemDictionary::CallSite_klass()) && (info.name() == vmSymbols::target_name())) {
+    const jint direction = frame::interpreter_frame_expression_stack_direction();
+    oop call_site     = *((oop*) thread->last_frame().interpreter_frame_tos_at(-1 * direction));
+    oop method_handle = *((oop*) thread->last_frame().interpreter_frame_tos_at( 0 * direction));
+    assert(call_site    ->is_a(SystemDictionary::CallSite_klass()),     "must be");
+    assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "must be");
+
+    {
+      // Walk all nmethods depending on this call site.
+      MutexLocker mu(Compile_lock, thread);
+      Universe::flush_dependents_on(call_site, method_handle);
+    }
+
+    // Don't allow fast path for setting CallSite.target and sub-classes.
+    put_code = (Bytecodes::Code) 0;
+  }
+
   cache_entry(thread)->set_field(
     get_code,
     put_code,
@@ -844,7 +859,7 @@
   const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
   const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
 
-  nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, thread);
+  nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread);
 
   if (osr_nm != NULL) {
     // We may need to do on-stack replacement which requires that no
@@ -969,11 +984,8 @@
   // check the access_flags for the field in the klass
 
   instanceKlass* ik = instanceKlass::cast(java_lang_Class::as_klassOop(cp_entry->f1()));
-  typeArrayOop fields = ik->fields();
   int index = cp_entry->field_index();
-  assert(index < fields->length(), "holders field index is out of range");
-  // bail out if field accesses are not watched
-  if ((fields->ushort_at(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return;
+  if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return;
 
   switch(cp_entry->flag_state()) {
     case btos:    // fall through
@@ -1006,11 +1018,9 @@
 
   // check the access_flags for the field in the klass
   instanceKlass* ik = instanceKlass::cast(k);
-  typeArrayOop fields = ik->fields();
   int index = cp_entry->field_index();
-  assert(index < fields->length(), "holders field index is out of range");
   // bail out if field modifications are not watched
-  if ((fields->ushort_at(index) & JVM_ACC_FIELD_MODIFICATION_WATCHED) == 0) return;
+  if ((ik->field_access_flags(index) & JVM_ACC_FIELD_MODIFICATION_WATCHED) == 0) return;
 
   char sig_type = '\0';
 
@@ -1229,7 +1239,7 @@
   // preparing the same method will be sure to see non-null entry & mirror.
 IRT_END
 
-#if defined(IA32) || defined(AMD64)
+#if defined(IA32) || defined(AMD64) || defined(ARM)
 IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address))
   if (src_address == dest_address) {
     return;
--- a/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -141,8 +141,8 @@
                                         methodOopDesc* method,
                                         intptr_t* from, intptr_t* to);
 
-#if defined(IA32) || defined(AMD64)
-  // Popframe support (only needed on x86 and AMD64)
+#if defined(IA32) || defined(AMD64) || defined(ARM)
+  // Popframe support (only needed on x86, AMD64 and ARM)
   static void popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address);
 #endif
 
--- a/src/share/vm/interpreter/linkResolver.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/interpreter/linkResolver.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -132,7 +132,7 @@
       return;
     }
     CompileBroker::compile_method(selected_method, InvocationEntryBci,
-                                  CompLevel_initial_compile,
+                                  CompilationPolicy::policy()->initial_compile_level(),
                                   methodHandle(), 0, "must_be_compiled", CHECK);
   }
 }
--- a/src/share/vm/interpreter/templateTable.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/interpreter/templateTable.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -120,8 +120,8 @@
 
   // helpers
   static void unimplemented_bc();
-  static void patch_bytecode(Bytecodes::Code bc, Register scratch1,
-                             Register scratch2, bool load_bc_in_scratch = true);
+  static void patch_bytecode(Bytecodes::Code bc, Register bc_reg,
+                             Register temp_reg, bool load_bc_into_bc_reg = true, int byte_no = -1);
 
   // C calls
   static void call_VM(Register oop_result, address entry_point);
--- a/src/share/vm/memory/allocation.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/allocation.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -151,6 +151,8 @@
 //------------------------------Chunk------------------------------------------
 // Linked list of raw memory chunks
 class Chunk: public CHeapObj {
+  friend class VMStructs;
+
  protected:
   Chunk*       _next;     // Next Chunk in list
   const size_t _len;      // Size of this Chunk
@@ -200,6 +202,8 @@
   friend class ResourceMark;
   friend class HandleMark;
   friend class NoHandleMark;
+  friend class VMStructs;
+
   Chunk *_first;                // First chunk
   Chunk *_chunk;                // current chunk
   char *_hwm, *_max;            // High water mark and max in current chunk
--- a/src/share/vm/memory/collectorPolicy.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/collectorPolicy.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -750,10 +750,6 @@
   return NULL;
 }
 
-size_t GenCollectorPolicy::large_typearray_limit() {
-  return FastAllocateSizeLimit;
-}
-
 // Return true if any of the following is true:
 // . the allocation won't fit into the current young gen heap
 // . gc locker is occupied (jni critical section)
--- a/src/share/vm/memory/collectorPolicy.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/collectorPolicy.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -280,9 +280,6 @@
 
   HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
 
-  // The size that defines a "large array".
-  virtual size_t large_typearray_limit();
-
   // Adaptive size policy
   virtual void initialize_size_policy(size_t init_eden_size,
                                       size_t init_promo_size,
--- a/src/share/vm/memory/defNewGeneration.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/defNewGeneration.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -905,6 +905,10 @@
     to()->check_mangled_unused_area_complete();
   }
 
+  if (!CleanChunkPoolAsync) {
+    Chunk::clean_chunk_pool();
+  }
+
   // update the generation and space performance counters
   update_counters();
   gch->collector_policy()->counters()->update_counters();
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -434,11 +434,9 @@
 }
 
 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
-                                         bool is_large_noref,
-                                         bool is_tlab,
                                          bool* gc_overhead_limit_was_exceeded) {
   return collector_policy()->mem_allocate_work(size,
-                                               is_tlab,
+                                               false /* is_tlab */,
                                                gc_overhead_limit_was_exceeded);
 }
 
@@ -601,8 +599,7 @@
           // atomic wrt other collectors in this configuration, we
           // are guaranteed to have empty discovered ref lists.
           if (rp->discovery_is_atomic()) {
-            rp->verify_no_references_recorded();
-            rp->enable_discovery();
+            rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
             rp->setup_policy(do_clear_all_soft_refs);
           } else {
             // collect() below will enable discovery as appropriate
@@ -1120,11 +1117,9 @@
 
 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
   bool gc_overhead_limit_was_exceeded;
-  HeapWord* result = mem_allocate(size   /* size */,
-                                  false  /* is_large_noref */,
-                                  true   /* is_tlab */,
-                                  &gc_overhead_limit_was_exceeded);
-  return result;
+  return collector_policy()->mem_allocate_work(size /* size */,
+                                               true /* is_tlab */,
+                                               &gc_overhead_limit_was_exceeded);
 }
 
 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
@@ -1179,10 +1174,6 @@
   }
 }
 
-size_t GenCollectedHeap::large_typearray_limit() {
-  return gen_policy()->large_typearray_limit();
-}
-
 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
   void do_generation(Generation* gen) {
     gen->prepare_for_verify();
@@ -1260,7 +1251,7 @@
   return _gens[level]->gc_stats();
 }
 
-void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
+void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
   if (!silent) {
     gclog_or_tty->print("permgen ");
   }
@@ -1277,10 +1268,6 @@
     gclog_or_tty->print("remset ");
   }
   rem_set()->verify();
-  if (!silent) {
-     gclog_or_tty->print("ref_proc ");
-  }
-  ReferenceProcessor::verify();
 }
 
 void GenCollectedHeap::print() const { print_on(tty); }
@@ -1392,6 +1379,10 @@
   generation_iterate(&blk, false);  // not old-to-young.
   perm_gen()->gc_epilogue(full);
 
+  if (!CleanChunkPoolAsync) {
+    Chunk::clean_chunk_pool();
+  }
+
   always_do_update_barrier = UseConcMarkSweepGC;
 };
 
--- a/src/share/vm/memory/genCollectedHeap.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,8 +161,6 @@
   size_t max_capacity() const;
 
   HeapWord* mem_allocate(size_t size,
-                         bool   is_large_noref,
-                         bool   is_tlab,
                          bool*  gc_overhead_limit_was_exceeded);
 
   // We may support a shared contiguous allocation area, if the youngest
@@ -315,8 +313,6 @@
   // contributed as it needs.
   void release_scratch();
 
-  size_t large_typearray_limit();
-
   // Ensure parsability: override
   virtual void ensure_parsability(bool retire_tlabs);
 
@@ -361,7 +357,7 @@
   void prepare_for_verify();
 
   // Override.
-  void verify(bool allow_dirty, bool silent, bool /* option */);
+  void verify(bool allow_dirty, bool silent, VerifyOption option);
 
   // Override.
   void print() const;
--- a/src/share/vm/memory/referenceProcessor.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/referenceProcessor.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -35,62 +35,16 @@
 
 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
-oop              ReferenceProcessor::_sentinelRef = NULL;
-const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;
-
-// List of discovered references.
-class DiscoveredList {
-public:
-  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
-  oop head() const     {
-     return UseCompressedOops ?  oopDesc::decode_heap_oop_not_null(_compressed_head) :
-                                _oop_head;
-  }
-  HeapWord* adr_head() {
-    return UseCompressedOops ? (HeapWord*)&_compressed_head :
-                               (HeapWord*)&_oop_head;
-  }
-  void   set_head(oop o) {
-    if (UseCompressedOops) {
-      // Must compress the head ptr.
-      _compressed_head = oopDesc::encode_heap_oop_not_null(o);
-    } else {
-      _oop_head = o;
-    }
-  }
-  bool   empty() const          { return head() == ReferenceProcessor::sentinel_ref(); }
-  size_t length()               { return _len; }
-  void   set_length(size_t len) { _len = len;  }
-  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
-  void   dec_length(size_t dec) { _len -= dec; }
-private:
-  // Set value depending on UseCompressedOops. This could be a template class
-  // but then we have to fix all the instantiations and declarations that use this class.
-  oop       _oop_head;
-  narrowOop _compressed_head;
-  size_t _len;
-};
+bool             ReferenceProcessor::_pending_list_uses_discovered_field = false;
 
 void referenceProcessor_init() {
   ReferenceProcessor::init_statics();
 }
 
 void ReferenceProcessor::init_statics() {
-  assert(_sentinelRef == NULL, "should be initialized precisely once");
-  EXCEPTION_MARK;
-  _sentinelRef = instanceKlass::cast(
-                    SystemDictionary::Reference_klass())->
-                      allocate_permanent_instance(THREAD);
-
   // Initialize the master soft ref clock.
   java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
 
-  if (HAS_PENDING_EXCEPTION) {
-      Handle ex(THREAD, PENDING_EXCEPTION);
-      vm_exit_during_initialization(ex);
-  }
-  assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
-         "Just constructed it!");
   _always_clear_soft_ref_policy = new AlwaysClearPolicy();
   _default_soft_ref_policy      = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
                                       NOT_COMPILER2(LRUCurrentHeapPolicy());
@@ -100,6 +54,7 @@
   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
             RefDiscoveryPolicy == ReferentBasedDiscovery,
             "Unrecongnized RefDiscoveryPolicy");
+  _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
 }
 
 ReferenceProcessor::ReferenceProcessor(MemRegion span,
@@ -123,20 +78,20 @@
   _discovery_is_mt     = mt_discovery;
   _num_q               = MAX2(1, mt_processing_degree);
   _max_num_q           = MAX2(_num_q, mt_discovery_degree);
-  _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
+  _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList,
+                                          _max_num_q * number_of_subclasses_of_ref());
   if (_discoveredSoftRefs == NULL) {
     vm_exit_during_initialization("Could not allocated RefProc Array");
   }
   _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_q];
   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
-  assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
-  // Initialized all entries to _sentinelRef
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
-        _discoveredSoftRefs[i].set_head(sentinel_ref());
+  // Initialized all entries to NULL
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+    _discoveredSoftRefs[i].set_head(NULL);
     _discoveredSoftRefs[i].set_length(0);
   }
-  // If we do barreirs, cache a copy of the barrier set.
+  // If we do barriers, cache a copy of the barrier set.
   if (discovered_list_needs_barrier) {
     _bs = Universe::heap()->barrier_set();
   }
@@ -146,19 +101,15 @@
 #ifndef PRODUCT
 void ReferenceProcessor::verify_no_references_recorded() {
   guarantee(!_discovering_refs, "Discovering refs?");
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
-    guarantee(_discoveredSoftRefs[i].empty(),
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+    guarantee(_discoveredSoftRefs[i].is_empty(),
               "Found non-empty discovered list");
   }
 }
 #endif
 
 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
-  // Should this instead be
-  // for (int i = 0; i < subclasses_of_ref; i++_ {
-  //   for (int j = 0; j < _num_q; j++) {
-  //     int index = i * _max_num_q + j;
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (UseCompressedOops) {
       f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
     } else {
@@ -167,10 +118,6 @@
   }
 }
 
-void ReferenceProcessor::oops_do(OopClosure* f) {
-  f->do_oop(adr_sentinel_ref());
-}
-
 void ReferenceProcessor::update_soft_ref_master_clock() {
   // Update (advance) the soft ref master clock field. This must be done
   // after processing the soft ref list.
@@ -283,8 +230,6 @@
   }
 #endif
   JNIHandles::weak_oops_do(is_alive, keep_alive);
-  // Finally remember to keep sentinel around
-  keep_alive->do_oop(adr_sentinel_ref());
   complete_gc->do_void();
 }
 
@@ -327,46 +272,77 @@
 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
                                                     HeapWord* pending_list_addr) {
   // Given a list of refs linked through the "discovered" field
-  // (java.lang.ref.Reference.discovered) chain them through the
-  // "next" field (java.lang.ref.Reference.next) and prepend
-  // to the pending list.
+  // (java.lang.ref.Reference.discovered), self-loop their "next" field
+  // thus distinguishing them from active References, then
+  // prepend them to the pending list.
+  // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777),
+  // the "next" field is used to chain the pending list, not the discovered
+  // field.
+
   if (TraceReferenceGC && PrintGCDetails) {
     gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
                            INTPTR_FORMAT, (address)refs_list.head());
   }
-  oop obj = refs_list.head();
-  // Walk down the list, copying the discovered field into
-  // the next field and clearing it (except for the last
-  // non-sentinel object which is treated specially to avoid
-  // confusion with an active reference).
-  while (obj != sentinel_ref()) {
-    assert(obj->is_instanceRef(), "should be reference object");
-    oop next = java_lang_ref_Reference::discovered(obj);
-    if (TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
-                             obj, next);
+
+  oop obj = NULL;
+  oop next_d = refs_list.head();
+  if (pending_list_uses_discovered_field()) { // New behaviour
+    // Walk down the list, self-looping the next field
+    // so that the References are not considered active.
+    while (obj != next_d) {
+      obj = next_d;
+      assert(obj->is_instanceRef(), "should be reference object");
+      next_d = java_lang_ref_Reference::discovered(obj);
+      if (TraceReferenceGC && PrintGCDetails) {
+        gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
+                               obj, next_d);
+      }
+      assert(java_lang_ref_Reference::next(obj) == NULL,
+             "Reference not active; should not be discovered");
+      // Self-loop next, so as to make Ref not active.
+      java_lang_ref_Reference::set_next(obj, obj);
+      if (next_d == obj) {  // obj is last
+        // Swap refs_list into pendling_list_addr and
+        // set obj's discovered to what we read from pending_list_addr.
+        oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
+        // Need oop_check on pending_list_addr above;
+        // see special oop-check code at the end of
+        // enqueue_discovered_reflists() further below.
+        java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL
+      }
     }
-    assert(java_lang_ref_Reference::next(obj) == NULL,
-           "The reference should not be enqueued");
-    if (next == sentinel_ref()) {  // obj is last
-      // Swap refs_list into pendling_list_addr and
-      // set obj's next to what we read from pending_list_addr.
-      oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
-      // Need oop_check on pending_list_addr above;
-      // see special oop-check code at the end of
-      // enqueue_discovered_reflists() further below.
-      if (old == NULL) {
-        // obj should be made to point to itself, since
-        // pending list was empty.
-        java_lang_ref_Reference::set_next(obj, obj);
+  } else { // Old behaviour
+    // Walk down the list, copying the discovered field into
+    // the next field and clearing the discovered field.
+    while (obj != next_d) {
+      obj = next_d;
+      assert(obj->is_instanceRef(), "should be reference object");
+      next_d = java_lang_ref_Reference::discovered(obj);
+      if (TraceReferenceGC && PrintGCDetails) {
+        gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
+                               obj, next_d);
+      }
+      assert(java_lang_ref_Reference::next(obj) == NULL,
+             "The reference should not be enqueued");
+      if (next_d == obj) {  // obj is last
+        // Swap refs_list into pendling_list_addr and
+        // set obj's next to what we read from pending_list_addr.
+        oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
+        // Need oop_check on pending_list_addr above;
+        // see special oop-check code at the end of
+        // enqueue_discovered_reflists() further below.
+        if (old == NULL) {
+          // obj should be made to point to itself, since
+          // pending list was empty.
+          java_lang_ref_Reference::set_next(obj, obj);
+        } else {
+          java_lang_ref_Reference::set_next(obj, old);
+        }
       } else {
-        java_lang_ref_Reference::set_next(obj, old);
+        java_lang_ref_Reference::set_next(obj, next_d);
       }
-    } else {
-      java_lang_ref_Reference::set_next(obj, next);
+      java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
     }
-    java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
-    obj = next;
   }
 }
 
@@ -376,10 +352,9 @@
   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
                      DiscoveredList      discovered_refs[],
                      HeapWord*           pending_list_addr,
-                     oop                 sentinel_ref,
                      int                 n_queues)
     : EnqueueTask(ref_processor, discovered_refs,
-                  pending_list_addr, sentinel_ref, n_queues)
+                  pending_list_addr, n_queues)
   { }
 
   virtual void work(unsigned int work_id) {
@@ -392,11 +367,11 @@
     // allocated and are indexed into.
     assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
     for (int j = 0;
-         j < subclasses_of_ref;
+         j < ReferenceProcessor::number_of_subclasses_of_ref();
          j++, index += _n_queues) {
       _ref_processor.enqueue_discovered_reflist(
         _refs_lists[index], _pending_list_addr);
-      _refs_lists[index].set_head(_sentinel_ref);
+      _refs_lists[index].set_head(NULL);
       _refs_lists[index].set_length(0);
     }
   }
@@ -408,125 +383,19 @@
   if (_processing_is_mt && task_executor != NULL) {
     // Parallel code
     RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
-                           pending_list_addr, sentinel_ref(), _max_num_q);
+                           pending_list_addr, _max_num_q);
     task_executor->execute(tsk);
   } else {
     // Serial code: call the parent class's implementation
-    for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+    for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
       enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
-      _discoveredSoftRefs[i].set_head(sentinel_ref());
+      _discoveredSoftRefs[i].set_head(NULL);
       _discoveredSoftRefs[i].set_length(0);
     }
   }
 }
 
-// Iterator for the list of discovered references.
-class DiscoveredListIterator {
-public:
-  inline DiscoveredListIterator(DiscoveredList&    refs_list,
-                                OopClosure*        keep_alive,
-                                BoolObjectClosure* is_alive);
-
-  // End Of List.
-  inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
-
-  // Get oop to the Reference object.
-  inline oop obj() const { return _ref; }
-
-  // Get oop to the referent object.
-  inline oop referent() const { return _referent; }
-
-  // Returns true if referent is alive.
-  inline bool is_referent_alive() const;
-
-  // Loads data for the current reference.
-  // The "allow_null_referent" argument tells us to allow for the possibility
-  // of a NULL referent in the discovered Reference object. This typically
-  // happens in the case of concurrent collectors that may have done the
-  // discovery concurrently, or interleaved, with mutator execution.
-  inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
-
-  // Move to the next discovered reference.
-  inline void next();
-
-  // Remove the current reference from the list
-  inline void remove();
-
-  // Make the Reference object active again.
-  inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
-
-  // Make the referent alive.
-  inline void make_referent_alive() {
-    if (UseCompressedOops) {
-      _keep_alive->do_oop((narrowOop*)_referent_addr);
-    } else {
-      _keep_alive->do_oop((oop*)_referent_addr);
-    }
-  }
-
-  // Update the discovered field.
-  inline void update_discovered() {
-    // First _prev_next ref actually points into DiscoveredList (gross).
-    if (UseCompressedOops) {
-      _keep_alive->do_oop((narrowOop*)_prev_next);
-    } else {
-      _keep_alive->do_oop((oop*)_prev_next);
-    }
-  }
-
-  // NULL out referent pointer.
-  inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
-
-  // Statistics
-  NOT_PRODUCT(
-  inline size_t processed() const { return _processed; }
-  inline size_t removed() const   { return _removed; }
-  )
-
-  inline void move_to_next();
-
-private:
-  DiscoveredList&    _refs_list;
-  HeapWord*          _prev_next;
-  oop                _ref;
-  HeapWord*          _discovered_addr;
-  oop                _next;
-  HeapWord*          _referent_addr;
-  oop                _referent;
-  OopClosure*        _keep_alive;
-  BoolObjectClosure* _is_alive;
-  DEBUG_ONLY(
-  oop                _first_seen; // cyclic linked list check
-  )
-  NOT_PRODUCT(
-  size_t             _processed;
-  size_t             _removed;
-  )
-};
-
-inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList&    refs_list,
-                                                      OopClosure*        keep_alive,
-                                                      BoolObjectClosure* is_alive)
-  : _refs_list(refs_list),
-    _prev_next(refs_list.adr_head()),
-    _ref(refs_list.head()),
-#ifdef ASSERT
-    _first_seen(refs_list.head()),
-#endif
-#ifndef PRODUCT
-    _processed(0),
-    _removed(0),
-#endif
-    _next(refs_list.head()),
-    _keep_alive(keep_alive),
-    _is_alive(is_alive)
-{ }
-
-inline bool DiscoveredListIterator::is_referent_alive() const {
-  return _is_alive->do_object_b(_referent);
-}
-
-inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
+void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
   oop discovered = java_lang_ref_Reference::discovered(_ref);
   assert(_discovered_addr && discovered->is_oop_or_null(),
@@ -542,30 +411,55 @@
          "bad referent");
 }
 
-inline void DiscoveredListIterator::next() {
-  _prev_next = _discovered_addr;
-  move_to_next();
-}
-
-inline void DiscoveredListIterator::remove() {
+void DiscoveredListIterator::remove() {
   assert(_ref->is_oop(), "Dropping a bad reference");
   oop_store_raw(_discovered_addr, NULL);
+
   // First _prev_next ref actually points into DiscoveredList (gross).
+  oop new_next;
+  if (_next == _ref) {
+    // At the end of the list, we should make _prev point to itself.
+    // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
+    // and _prev will be NULL.
+    new_next = _prev;
+  } else {
+    new_next = _next;
+  }
+
   if (UseCompressedOops) {
     // Remove Reference object from list.
-    oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
+    oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
   } else {
     // Remove Reference object from list.
-    oopDesc::store_heap_oop((oop*)_prev_next, _next);
+    oopDesc::store_heap_oop((oop*)_prev_next, new_next);
   }
   NOT_PRODUCT(_removed++);
   _refs_list.dec_length(1);
 }
 
-inline void DiscoveredListIterator::move_to_next() {
-  _ref = _next;
-  assert(_ref != _first_seen, "cyclic ref_list found");
-  NOT_PRODUCT(_processed++);
+// Make the Reference object active again.
+void DiscoveredListIterator::make_active() {
+  // For G1 we don't want to use set_next - it
+  // will dirty the card for the next field of
+  // the reference object and will fail
+  // CT verification.
+  if (UseG1GC) {
+    BarrierSet* bs = oopDesc::bs();
+    HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
+
+    if (UseCompressedOops) {
+      bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
+    } else {
+      bs->write_ref_field_pre((oop*)next_addr, NULL);
+    }
+    java_lang_ref_Reference::set_next_raw(_ref, NULL);
+  } else {
+    java_lang_ref_Reference::set_next(_ref, NULL);
+  }
+}
+
+void DiscoveredListIterator::clear_referent() {
+  oop_store_raw(_referent_addr, NULL);
 }
 
 // NOTE: process_phase*() are largely similar, and at a high level
@@ -613,7 +507,7 @@
   NOT_PRODUCT(
     if (PrintGCDetails && TraceReferenceGC) {
       gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
-        "discovered Refs by policy  list " INTPTR_FORMAT,
+        "discovered Refs by policy, from list " INTPTR_FORMAT,
         iter.removed(), iter.processed(), (address)refs_list.head());
     }
   )
@@ -725,30 +619,35 @@
     assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
     iter.next();
   }
-  // Remember to keep sentinel pointer around
+  // Remember to update the next pointer of the last ref.
   iter.update_discovered();
   // Close the reachable set
   complete_gc->do_void();
 }
 
 void
-ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
-  oop obj = refs_list.head();
-  while (obj != sentinel_ref()) {
-    oop discovered = java_lang_ref_Reference::discovered(obj);
+ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) {
+  oop obj = NULL;
+  oop next = refs_list.head();
+  while (next != obj) {
+    obj = next;
+    next = java_lang_ref_Reference::discovered(obj);
     java_lang_ref_Reference::set_discovered_raw(obj, NULL);
-    obj = discovered;
   }
-  refs_list.set_head(sentinel_ref());
+  refs_list.set_head(NULL);
   refs_list.set_length(0);
 }
 
+void
+ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
+  clear_discovered_references(refs_list);
+}
+
 void ReferenceProcessor::abandon_partial_discovery() {
   // loop over the lists
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
-      gclog_or_tty->print_cr("\nAbandoning %s discovered list",
-                             list_name(i));
+      gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
     }
     abandon_partial_discovered_list(_discoveredSoftRefs[i]);
   }
@@ -817,6 +716,14 @@
   bool _clear_referent;
 };
 
+void ReferenceProcessor::set_discovered(oop ref, oop value) {
+  if (_discovered_list_needs_barrier) {
+    java_lang_ref_Reference::set_discovered(ref, value);
+  } else {
+    java_lang_ref_Reference::set_discovered_raw(ref, value);
+  }
+}
+
 // Balances reference queues.
 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
 // queues[0, 1, ..., _num_q-1] because only the first _num_q
@@ -859,6 +766,9 @@
           refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
                               avg_refs - ref_lists[to_idx].length());
         }
+
+        assert(refs_to_move > 0, "otherwise the code below will fail");
+
         oop move_head = ref_lists[from_idx].head();
         oop move_tail = move_head;
         oop new_head  = move_head;
@@ -867,10 +777,24 @@
           move_tail = new_head;
           new_head = java_lang_ref_Reference::discovered(new_head);
         }
-        java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
+
+        // Add the chain to the to list.
+        if (ref_lists[to_idx].head() == NULL) {
+          // to list is empty. Make a loop at the end.
+          set_discovered(move_tail, move_tail);
+        } else {
+          set_discovered(move_tail, ref_lists[to_idx].head());
+        }
         ref_lists[to_idx].set_head(move_head);
         ref_lists[to_idx].inc_length(refs_to_move);
-        ref_lists[from_idx].set_head(new_head);
+
+        // Remove the chain from the from list.
+        if (move_tail == new_head) {
+          // We found the end of the from list.
+          ref_lists[from_idx].set_head(NULL);
+        } else {
+          ref_lists[from_idx].set_head(new_head);
+        }
         ref_lists[from_idx].dec_length(refs_to_move);
         if (ref_lists[from_idx].length() == 0) {
           break;
@@ -980,11 +904,7 @@
 
 void ReferenceProcessor::clean_up_discovered_references() {
   // loop over the lists
-  // Should this instead be
-  // for (int i = 0; i < subclasses_of_ref; i++_ {
-  //   for (int j = 0; j < _num_q; j++) {
-  //     int index = i * _max_num_q + j;
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
       gclog_or_tty->print_cr(
         "\nScrubbing %s discovered list of Null referents",
@@ -1082,42 +1002,40 @@
   // First we must make sure this object is only enqueued once. CAS in a non null
   // discovered_addr.
   oop current_head = refs_list.head();
+  // The last ref must have its discovered field pointing to itself.
+  oop next_discovered = (current_head != NULL) ? current_head : obj;
 
   // Note: In the case of G1, this specific pre-barrier is strictly
   // not necessary because the only case we are interested in
   // here is when *discovered_addr is NULL (see the CAS further below),
   // so this will expand to nothing. As a result, we have manually
   // elided this out for G1, but left in the test for some future
-  // collector that might have need for a pre-barrier here.
-  if (_discovered_list_needs_barrier && !UseG1GC) {
-    if (UseCompressedOops) {
-      _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
-    } else {
-      _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
-    }
-    guarantee(false, "Need to check non-G1 collector");
-  }
-  oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
+  // collector that might have need for a pre-barrier here, e.g.:-
+  // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
+  assert(!_discovered_list_needs_barrier || UseG1GC,
+         "Need to check non-G1 collector: "
+         "may need a pre-write-barrier for CAS from NULL below");
+  oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
                                                     NULL);
   if (retest == NULL) {
     // This thread just won the right to enqueue the object.
-    // We have separate lists for enqueueing so no synchronization
+    // We have separate lists for enqueueing, so no synchronization
     // is necessary.
     refs_list.set_head(obj);
     refs_list.inc_length(1);
     if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, current_head);
+      _bs->write_ref_field((void*)discovered_addr, next_discovered);
     }
 
     if (TraceReferenceGC) {
-      gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
+      gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
                              obj, obj->blueprint()->internal_name());
     }
   } else {
     // If retest was non NULL, another thread beat us to it:
     // The reference has already been discovered...
     if (TraceReferenceGC) {
-      gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
+      gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
                              obj, obj->blueprint()->internal_name());
     }
   }
@@ -1142,7 +1060,7 @@
 //     (or part of the heap being collected, indicated by our "span"
 //     we don't treat it specially (i.e. we scan it as we would
 //     a normal oop, treating its references as strong references).
-//     This means that references can't be enqueued unless their
+//     This means that references can't be discovered unless their
 //     referent is also in the same span. This is the simplest,
 //     most "local" and most conservative approach, albeit one
 //     that may cause weak references to be enqueued least promptly.
@@ -1164,14 +1082,13 @@
 //     and complexity in processing these references.
 //     We call this choice the "RefeferentBasedDiscovery" policy.
 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
-  // We enqueue references only if we are discovering refs
-  // (rather than processing discovered refs).
+  // Make sure we are discovering refs (rather than processing discovered refs).
   if (!_discovering_refs || !RegisterReferences) {
     return false;
   }
-  // We only enqueue active references.
+  // We only discover active references.
   oop next = java_lang_ref_Reference::next(obj);
-  if (next != NULL) {
+  if (next != NULL) {   // Ref is no longer active
     return false;
   }
 
@@ -1184,8 +1101,8 @@
     return false;
   }
 
-  // We only enqueue references whose referents are not (yet) strongly
-  // reachable.
+  // We only discover references whose referents are not (yet)
+  // known to be strongly reachable.
   if (is_alive_non_header() != NULL) {
     verify_referent(obj);
     if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
@@ -1205,13 +1122,15 @@
     }
   }
 
+  ResourceMark rm;      // Needed for tracing.
+
   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
   const oop  discovered = java_lang_ref_Reference::discovered(obj);
   assert(discovered->is_oop_or_null(), "bad discovered field");
   if (discovered != NULL) {
     // The reference has already been discovered...
     if (TraceReferenceGC) {
-      gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
+      gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
                              obj, obj->blueprint()->internal_name());
     }
     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
@@ -1233,9 +1152,9 @@
 
   if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
     verify_referent(obj);
-    // enqueue if and only if either:
-    // reference is in our span or
-    // we are an atomic collector and referent is in our span
+    // Discover if and only if EITHER:
+    // .. reference is in our span, OR
+    // .. we are an atomic collector and referent is in our span
     if (_span.contains(obj_addr) ||
         (discovery_is_atomic() &&
          _span.contains(java_lang_ref_Reference::referent(obj)))) {
@@ -1262,30 +1181,28 @@
     // here: the field will be visited later when processing the discovered
     // references.
     oop current_head = list->head();
+    // The last ref must have its discovered field pointing to itself.
+    oop next_discovered = (current_head != NULL) ? current_head : obj;
+
     // As in the case further above, since we are over-writing a NULL
     // pre-value, we can safely elide the pre-barrier here for the case of G1.
+    // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
     assert(discovered == NULL, "control point invariant");
-    if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
-      if (UseCompressedOops) {
-        _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
-      } else {
-        _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
-      }
-      guarantee(false, "Need to check non-G1 collector");
-    }
-    oop_store_raw(discovered_addr, current_head);
+    assert(!_discovered_list_needs_barrier || UseG1GC,
+           "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
+    oop_store_raw(discovered_addr, next_discovered);
     if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, current_head);
+      _bs->write_ref_field((void*)discovered_addr, next_discovered);
     }
     list->set_head(obj);
     list->inc_length(1);
 
     if (TraceReferenceGC) {
-      gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
+      gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
                                 obj, obj->blueprint()->internal_name());
     }
   }
-  assert(obj->is_oop(), "Enqueued a bad reference");
+  assert(obj->is_oop(), "Discovered a bad reference");
   verify_referent(obj);
   return true;
 }
@@ -1419,7 +1336,9 @@
 }
 
 const char* ReferenceProcessor::list_name(int i) {
-   assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
+   assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
+          "Out of bounds index");
+
    int j = i / _max_num_q;
    switch (j) {
      case 0: return "SoftRef";
@@ -1437,22 +1356,12 @@
 }
 #endif
 
-void ReferenceProcessor::verify() {
-  guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
-}
-
 #ifndef PRODUCT
 void ReferenceProcessor::clear_discovered_references() {
   guarantee(!_discovering_refs, "Discovering refs?");
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
-    oop obj = _discoveredSoftRefs[i].head();
-    while (obj != sentinel_ref()) {
-      oop next = java_lang_ref_Reference::discovered(obj);
-      java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
-      obj = next;
-    }
-    _discoveredSoftRefs[i].set_head(sentinel_ref());
-    _discoveredSoftRefs[i].set_length(0);
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+    clear_discovered_references(_discoveredSoftRefs[i]);
   }
 }
+
 #endif // PRODUCT
--- a/src/share/vm/memory/referenceProcessor.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/referenceProcessor.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -48,18 +48,175 @@
 // forward references
 class ReferencePolicy;
 class AbstractRefProcTaskExecutor;
-class DiscoveredList;
+
+// List of discovered references.
+class DiscoveredList {
+public:
+  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
+  oop head() const     {
+     return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
+                                _oop_head;
+  }
+  HeapWord* adr_head() {
+    return UseCompressedOops ? (HeapWord*)&_compressed_head :
+                               (HeapWord*)&_oop_head;
+  }
+  void set_head(oop o) {
+    if (UseCompressedOops) {
+      // Must compress the head ptr.
+      _compressed_head = oopDesc::encode_heap_oop(o);
+    } else {
+      _oop_head = o;
+    }
+  }
+  bool   is_empty() const       { return head() == NULL; }
+  size_t length()               { return _len; }
+  void   set_length(size_t len) { _len = len;  }
+  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
+  void   dec_length(size_t dec) { _len -= dec; }
+private:
+  // Set value depending on UseCompressedOops. This could be a template class
+  // but then we have to fix all the instantiations and declarations that use this class.
+  oop       _oop_head;
+  narrowOop _compressed_head;
+  size_t _len;
+};
+
+// Iterator for the list of discovered references.
+class DiscoveredListIterator {
+private:
+  DiscoveredList&    _refs_list;
+  HeapWord*          _prev_next;
+  oop                _prev;
+  oop                _ref;
+  HeapWord*          _discovered_addr;
+  oop                _next;
+  HeapWord*          _referent_addr;
+  oop                _referent;
+  OopClosure*        _keep_alive;
+  BoolObjectClosure* _is_alive;
+
+  DEBUG_ONLY(
+  oop                _first_seen; // cyclic linked list check
+  )
+
+  NOT_PRODUCT(
+  size_t             _processed;
+  size_t             _removed;
+  )
+
+public:
+  inline DiscoveredListIterator(DiscoveredList&    refs_list,
+                                OopClosure*        keep_alive,
+                                BoolObjectClosure* is_alive):
+    _refs_list(refs_list),
+    _prev_next(refs_list.adr_head()),
+    _prev(NULL),
+    _ref(refs_list.head()),
+#ifdef ASSERT
+    _first_seen(refs_list.head()),
+#endif
+#ifndef PRODUCT
+    _processed(0),
+    _removed(0),
+#endif
+    _next(NULL),
+    _keep_alive(keep_alive),
+    _is_alive(is_alive)
+{ }
+
+  // End Of List.
+  inline bool has_next() const { return _ref != NULL; }
+
+  // Get oop to the Reference object.
+  inline oop obj() const { return _ref; }
+
+  // Get oop to the referent object.
+  inline oop referent() const { return _referent; }
+
+  // Returns true if referent is alive.
+  inline bool is_referent_alive() const {
+    return _is_alive->do_object_b(_referent);
+  }
+
+  // Loads data for the current reference.
+  // The "allow_null_referent" argument tells us to allow for the possibility
+  // of a NULL referent in the discovered Reference object. This typically
+  // happens in the case of concurrent collectors that may have done the
+  // discovery concurrently, or interleaved, with mutator execution.
+  void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
+
+  // Move to the next discovered reference.
+  inline void next() {
+    _prev_next = _discovered_addr;
+    _prev = _ref;
+    move_to_next();
+  }
+
+  // Remove the current reference from the list
+  void remove();
+
+  // Make the Reference object active again.
+  void make_active();
+
+  // Make the referent alive.
+  inline void make_referent_alive() {
+    if (UseCompressedOops) {
+      _keep_alive->do_oop((narrowOop*)_referent_addr);
+    } else {
+      _keep_alive->do_oop((oop*)_referent_addr);
+    }
+  }
+
+  // Update the discovered field.
+  inline void update_discovered() {
+    // First _prev_next ref actually points into DiscoveredList (gross).
+    if (UseCompressedOops) {
+      if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
+        _keep_alive->do_oop((narrowOop*)_prev_next);
+      }
+    } else {
+      if (!oopDesc::is_null(*(oop*)_prev_next)) {
+        _keep_alive->do_oop((oop*)_prev_next);
+      }
+    }
+  }
+
+  // NULL out referent pointer.
+  void clear_referent();
+
+  // Statistics
+  NOT_PRODUCT(
+  inline size_t processed() const { return _processed; }
+  inline size_t removed() const   { return _removed; }
+  )
+
+  inline void move_to_next() {
+    if (_ref == _next) {
+      // End of the list.
+      _ref = NULL;
+    } else {
+      _ref = _next;
+    }
+    assert(_ref != _first_seen, "cyclic ref_list found");
+    NOT_PRODUCT(_processed++);
+  }
+
+};
 
 class ReferenceProcessor : public CHeapObj {
  protected:
-  // End of list marker
-  static oop  _sentinelRef;
-  MemRegion   _span; // (right-open) interval of heap
-                     // subject to wkref discovery
-  bool        _discovering_refs;      // true when discovery enabled
-  bool        _discovery_is_atomic;   // if discovery is atomic wrt
-                                      // other collectors in configuration
-  bool        _discovery_is_mt;       // true if reference discovery is MT.
+  // Compatibility with pre-4965777 JDK's
+  static bool _pending_list_uses_discovered_field;
+
+  MemRegion   _span;                    // (right-open) interval of heap
+                                        // subject to wkref discovery
+
+  bool        _discovering_refs;        // true when discovery enabled
+  bool        _discovery_is_atomic;     // if discovery is atomic wrt
+                                        // other collectors in configuration
+  bool        _discovery_is_mt;         // true if reference discovery is MT.
+
   // If true, setting "next" field of a discovered refs list requires
   // write barrier(s).  (Must be true if used in a collector in which
   // elements of a discovered list may be moved during discovery: for
@@ -67,18 +224,19 @@
   // long-term concurrent marking phase that does weak reference
   // discovery.)
   bool        _discovered_list_needs_barrier;
-  BarrierSet* _bs;                    // Cached copy of BarrierSet.
-  bool        _enqueuing_is_done;     // true if all weak references enqueued
-  bool        _processing_is_mt;      // true during phases when
-                                      // reference processing is MT.
-  int         _next_id;               // round-robin mod _num_q counter in
-                                      // support of work distribution
 
-  // For collectors that do not keep GC marking information
+  BarrierSet* _bs;                      // Cached copy of BarrierSet.
+  bool        _enqueuing_is_done;       // true if all weak references enqueued
+  bool        _processing_is_mt;        // true during phases when
+                                        // reference processing is MT.
+  int         _next_id;                 // round-robin mod _num_q counter in
+                                        // support of work distribution
+
+  // For collectors that do not keep GC liveness information
   // in the object header, this field holds a closure that
   // helps the reference processor determine the reachability
-  // of an oop (the field is currently initialized to NULL for
-  // all collectors but the CMS collector).
+  // of an oop. It is currently initialized to NULL for all
+  // collectors except for CMS and G1.
   BoolObjectClosure* _is_alive_non_header;
 
   // Soft ref clearing policies
@@ -102,12 +260,13 @@
   DiscoveredList* _discoveredPhantomRefs;
 
  public:
-  int num_q()                            { return _num_q; }
-  int max_num_q()                        { return _max_num_q; }
-  void set_active_mt_degree(int v)       { _num_q = v; }
-  DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
-  static oop  sentinel_ref()             { return _sentinelRef; }
-  static oop* adr_sentinel_ref()         { return &_sentinelRef; }
+  static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
+
+  int num_q()                              { return _num_q; }
+  int max_num_q()                          { return _max_num_q; }
+  void set_active_mt_degree(int v)         { _num_q = v; }
+  DiscoveredList* discovered_soft_refs()   { return _discoveredSoftRefs; }
+
   ReferencePolicy* setup_policy(bool always_clear) {
     _current_soft_ref_policy = always_clear ?
       _always_clear_soft_ref_policy : _default_soft_ref_policy;
@@ -115,7 +274,6 @@
     return _current_soft_ref_policy;
   }
 
- public:
   // Process references with a certain reachability level.
   void process_discovered_reflist(DiscoveredList               refs_lists[],
                                   ReferencePolicy*             policy,
@@ -208,6 +366,11 @@
   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 
  protected:
+  // Set the 'discovered' field of the given reference to
+  // the given value - emitting barriers depending upon
+  // the value of _discovered_list_needs_barrier.
+  void set_discovered(oop ref, oop value);
+
   // "Preclean" the given discovered reference list
   // by removing references with strongly reachable referents.
   // Currently used in support of CMS only.
@@ -230,6 +393,7 @@
                                         HeapWord* discovered_addr);
   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 
+  void clear_discovered_references(DiscoveredList& refs_list);
   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 
   // Calculate the number of jni handles.
@@ -292,7 +456,19 @@
   void      set_span(MemRegion span) { _span = span; }
 
   // start and stop weak ref discovery
-  void enable_discovery()   { _discovering_refs = true;  }
+  void enable_discovery(bool verify_disabled, bool check_no_refs) {
+#ifdef ASSERT
+    // Verify that we're not currently discovering refs
+    assert(!verify_disabled || !_discovering_refs, "nested call?");
+
+    if (check_no_refs) {
+      // Verify that the discovered lists are empty
+      verify_no_references_recorded();
+    }
+#endif // ASSERT
+    _discovering_refs = true;
+  }
+
   void disable_discovery()  { _discovering_refs = false; }
   bool discovery_enabled()  { return _discovering_refs;  }
 
@@ -300,6 +476,13 @@
   bool discovery_is_atomic() const { return _discovery_is_atomic; }
   void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
 
+  // whether the JDK in which we are embedded is a pre-4965777 JDK,
+  // and thus whether or not it uses the discovered field to chain
+  // the entries in the pending list.
+  static bool pending_list_uses_discovered_field() {
+    return _pending_list_uses_discovered_field;
+  }
+
   // whether discovery is done by multiple threads same-old-timeously
   bool discovery_is_mt() const { return _discovery_is_mt; }
   void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
@@ -314,7 +497,6 @@
 
   // iterate over oops
   void weak_oops_do(OopClosure* f);       // weak roots
-  static void oops_do(OopClosure* f);     // strong root(s)
 
   // Balance each of the discovered lists.
   void balance_all_queues();
@@ -340,7 +522,6 @@
   // debugging
   void verify_no_references_recorded() PRODUCT_RETURN;
   void verify_referent(oop obj)        PRODUCT_RETURN;
-  static void verify();
 
   // clear the discovered lists (unlinking each entry).
   void clear_discovered_references() PRODUCT_RETURN;
@@ -362,7 +543,7 @@
 
   ~NoRefDiscovery() {
     if (_was_discovering_refs) {
-      _rp->enable_discovery();
+      _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
     }
   }
 };
@@ -524,12 +705,10 @@
   EnqueueTask(ReferenceProcessor& ref_processor,
               DiscoveredList      refs_lists[],
               HeapWord*           pending_list_addr,
-              oop                 sentinel_ref,
               int                 n_queues)
     : _ref_processor(ref_processor),
       _refs_lists(refs_lists),
       _pending_list_addr(pending_list_addr),
-      _sentinel_ref(sentinel_ref),
       _n_queues(n_queues)
   { }
 
@@ -540,7 +719,6 @@
   ReferenceProcessor& _ref_processor;
   DiscoveredList*     _refs_lists;
   HeapWord*           _pending_list_addr;
-  oop                 _sentinel_ref;
   int                 _n_queues;
 };
 
--- a/src/share/vm/memory/resourceArea.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/resourceArea.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,6 +52,7 @@
 class ResourceArea: public Arena {
   friend class ResourceMark;
   friend class DeoptResourceMark;
+  friend class VMStructs;
   debug_only(int _nesting;)             // current # of nested ResourceMarks
   debug_only(static int _warned;)       // to suppress multiple warnings
 
--- a/src/share/vm/memory/sharedHeap.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/sharedHeap.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -146,7 +146,6 @@
   assert(_strong_roots_parity != 0, "must have called prologue code");
   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
     Universe::oops_do(roots);
-    ReferenceProcessor::oops_do(roots);
     // Consider perm-gen discovered lists to be strong.
     perm_gen()->ref_processor()->weak_oops_do(roots);
   }
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -124,16 +124,7 @@
   // Reserve space at the end of TLAB
   static size_t end_reserve() {
     int reserve_size = typeArrayOopDesc::header_size(T_INT);
-    if (AllocatePrefetchStyle == 3) {
-      // BIS is used to prefetch - we need a space for it.
-      // +1 for rounding up to next cache line +1 to be safe
-      int lines = AllocatePrefetchLines + 2;
-      int step_size = AllocatePrefetchStepSize;
-      int distance = AllocatePrefetchDistance;
-      int prefetch_end = (distance + step_size*lines)/(int)HeapWordSize;
-      reserve_size = MAX2(reserve_size, prefetch_end);
-    }
-    return reserve_size;
+    return MAX2(reserve_size, VM_Version::reserve_for_allocation_prefetch());
   }
   static size_t alignment_reserve()              { return align_object_size(end_reserve()); }
   static size_t alignment_reserve_in_bytes()     { return alignment_reserve() * HeapWordSize; }
--- a/src/share/vm/memory/universe.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/universe.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1177,7 +1177,7 @@
   // stopped dring the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
-  DepChange changes(dependee);
+  KlassDepChange changes(dependee);
 
   // Compute the dependent nmethods
   if (CodeCache::mark_for_deoptimization(changes) > 0) {
@@ -1187,6 +1187,37 @@
   }
 }
 
+// Flushes compiled methods dependent on a particular CallSite
+// instance when its target is different than the given MethodHandle.
+void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
+  assert_lock_strong(Compile_lock);
+
+  if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
+
+  // CodeCache can only be updated by a thread_in_VM and they will all be
+  // stopped dring the safepoint so CodeCache will be safe to update without
+  // holding the CodeCache_lock.
+
+  CallSiteDepChange changes(call_site(), method_handle());
+
+  // Compute the dependent nmethods that have a reference to a
+  // CallSite object.  We use instanceKlass::mark_dependent_nmethod
+  // directly instead of CodeCache::mark_for_deoptimization because we
+  // want dependents on the call site class only not all classes in
+  // the ContextStream.
+  int marked = 0;
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    instanceKlass* call_site_klass = instanceKlass::cast(call_site->klass());
+    marked = call_site_klass->mark_dependent_nmethods(changes);
+  }
+  if (marked > 0) {
+    // At least one nmethod has been marked for deoptimization
+    VM_Deoptimize op;
+    VMThread::execute(&op);
+  }
+}
+
 #ifdef HOTSWAP
 // Flushes compiled methods dependent on dependee in the evolutionary sense
 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
@@ -1278,7 +1309,7 @@
   st->print_cr("}");
 }
 
-void Universe::verify(bool allow_dirty, bool silent, bool option) {
+void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) {
   if (SharedSkipVerify) {
     return;
   }
--- a/src/share/vm/memory/universe.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/memory/universe.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -109,6 +109,14 @@
   bool    _use_implicit_null_checks;
 };
 
+enum VerifyOption {
+      VerifyOption_Default = 0,
+
+      // G1
+      VerifyOption_G1UsePrevMarking = VerifyOption_Default,
+      VerifyOption_G1UseNextMarking = VerifyOption_G1UsePrevMarking + 1,
+      VerifyOption_G1UseMarkWord    = VerifyOption_G1UseNextMarking + 1
+};
 
 class Universe: AllStatic {
   // Ugh.  Universe is much too friendly.
@@ -404,7 +412,8 @@
 
   // Debugging
   static bool verify_in_progress() { return _verify_in_progress; }
-  static void verify(bool allow_dirty = true, bool silent = false, bool option = true);
+  static void verify(bool allow_dirty = true, bool silent = false,
+                     VerifyOption option = VerifyOption_Default );
   static int  verify_count()                  { return _verify_count; }
   static void print();
   static void print_on(outputStream* st);
@@ -430,6 +439,7 @@
 
   // Flushing and deoptimization
   static void flush_dependents_on(instanceKlassHandle dependee);
+  static void flush_dependents_on(Handle call_site, Handle method_handle);
 #ifdef HOTSWAP
   // Flushing and deoptimization in case of evolution
   static void flush_evol_dependents_on(instanceKlassHandle dependee);
--- a/src/share/vm/oops/constMethodKlass.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/constMethodKlass.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -172,11 +172,6 @@
 int constMethodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
   assert(obj->is_constMethod(), "should be constMethod");
   constMethodOop cm_oop = constMethodOop(obj);
-#if 0
-  PSParallelCompact::adjust_pointer(cm_oop->adr_method());
-  PSParallelCompact::adjust_pointer(cm_oop->adr_exception_table());
-  PSParallelCompact::adjust_pointer(cm_oop->adr_stackmap_data());
-#endif
   oop* const beg_oop = cm_oop->oop_block_beg();
   oop* const end_oop = cm_oop->oop_block_end();
   for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
--- a/src/share/vm/oops/cpCacheKlass.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/cpCacheKlass.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -63,8 +63,10 @@
   //   CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
 
   oop obj = CollectedHeap::permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
-  NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj,
-                                                              size));
+#ifndef PRODUCT
+  const size_t hs = oopDesc::header_size();
+  Universe::heap()->check_for_bad_heap_word_value(((HeapWord*) obj)+hs, size-hs);
+#endif
   constantPoolCacheOop cache = (constantPoolCacheOop) obj;
   assert(!UseConcMarkSweepGC || obj->klass_or_null() == NULL,
          "klass should be NULL here when using CMS");
--- a/src/share/vm/oops/cpCacheOop.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/cpCacheOop.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -128,17 +128,13 @@
 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
                                        Bytecodes::Code put_code,
                                        KlassHandle field_holder,
-                                       int orig_field_index,
+                                       int field_index,
                                        int field_offset,
                                        TosState field_type,
                                        bool is_final,
                                        bool is_volatile) {
   set_f1(field_holder()->java_mirror());
   set_f2(field_offset);
-  // The field index is used by jvm/ti and is the index into fields() array
-  // in holder instanceKlass.  This is scaled by instanceKlass::next_offset.
-  assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index");
-  const int field_index = orig_field_index / instanceKlass::next_offset;
   assert(field_index <= field_index_mask,
          "field index does not fit in low flag bits");
   set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
@@ -149,7 +145,7 @@
 }
 
 int  ConstantPoolCacheEntry::field_index() const {
-  return (_flags & field_index_mask) * instanceKlass::next_offset;
+  return (_flags & field_index_mask);
 }
 
 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/oops/fieldInfo.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OOPS_FIELDINFO_HPP
+#define SHARE_VM_OOPS_FIELDINFO_HPP
+
+#include "oops/typeArrayOop.hpp"
+#include "classfile/vmSymbols.hpp"
+
+// This class represents the field information contained in the fields
+// array of an instanceKlass.  Currently it's laid on top an array of
+// Java shorts but in the future it could simply be used as a real
+// array type.  FieldInfo generally shouldn't be used directly.
+// Fields should be queried either through instanceKlass or through
+// the various FieldStreams.
+class FieldInfo VALUE_OBJ_CLASS_SPEC {
+  friend class fieldDescriptor;
+  friend class JavaFieldStream;
+  friend class ClassFileParser;
+
+ public:
+  // fields
+  // Field info extracted from the class file and stored
+  // as an array of 7 shorts
+  enum FieldOffset {
+    access_flags_offset      = 0,
+    name_index_offset        = 1,
+    signature_index_offset   = 2,
+    initval_index_offset     = 3,
+    low_offset               = 4,
+    high_offset              = 5,
+    generic_signature_offset = 6,
+    field_slots              = 7
+  };
+
+ private:
+  u2 _shorts[field_slots];
+
+  void set_name_index(u2 val)                    { _shorts[name_index_offset] = val;         }
+  void set_signature_index(u2 val)               { _shorts[signature_index_offset] = val;    }
+  void set_initval_index(u2 val)                 { _shorts[initval_index_offset] = val;      }
+  void set_generic_signature_index(u2 val)       { _shorts[generic_signature_offset] = val;  }
+
+  u2 name_index() const                          { return _shorts[name_index_offset];        }
+  u2 signature_index() const                     { return _shorts[signature_index_offset];   }
+  u2 initval_index() const                       { return _shorts[initval_index_offset];     }
+  u2 generic_signature_index() const             { return _shorts[generic_signature_offset]; }
+
+ public:
+  static FieldInfo* from_field_array(typeArrayOop fields, int index) {
+    return ((FieldInfo*)fields->short_at_addr(index * field_slots));
+  }
+
+  void initialize(u2 access_flags,
+                  u2 name_index,
+                  u2 signature_index,
+                  u2 initval_index,
+                  u2 generic_signature_index,
+                  u4 offset) {
+    _shorts[access_flags_offset] = access_flags;
+    _shorts[name_index_offset] = name_index;
+    _shorts[signature_index_offset] = signature_index;
+    _shorts[initval_index_offset] = initval_index;
+    _shorts[generic_signature_offset] = generic_signature_index;
+    set_offset(offset);
+  }
+
+  u2 access_flags() const                        { return _shorts[access_flags_offset];            }
+  u4 offset() const                              { return build_int_from_shorts(_shorts[low_offset], _shorts[high_offset]); }
+
+  Symbol* name(constantPoolHandle cp) const {
+    int index = name_index();
+    if (is_internal()) {
+      return lookup_symbol(index);
+    }
+    return cp->symbol_at(index);
+  }
+
+  Symbol* signature(constantPoolHandle cp) const {
+    int index = signature_index();
+    if (is_internal()) {
+      return lookup_symbol(index);
+    }
+    return cp->symbol_at(index);
+  }
+
+  Symbol* generic_signature(constantPoolHandle cp) const {
+    int index = generic_signature_index();
+    if (index == 0) {
+      return NULL;
+    }
+    return cp->symbol_at(index);
+  }
+
+  void set_access_flags(u2 val)                  { _shorts[access_flags_offset] = val;             }
+  void set_offset(u4 val)                        {
+    _shorts[low_offset] = extract_low_short_from_int(val);
+    _shorts[high_offset] = extract_high_short_from_int(val);
+  }
+
+  bool is_internal() const {
+    return (access_flags() & JVM_ACC_FIELD_INTERNAL) != 0;
+  }
+
+  Symbol* lookup_symbol(int symbol_index) const {
+    assert(is_internal(), "only internal fields");
+    return vmSymbols::symbol_at((vmSymbols::SID)symbol_index);
+  }
+};
+
+#endif // SHARE_VM_OOPS_FIELDINFO_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/oops/fieldStreams.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OOPS_FIELDSTREAMS_HPP
+#define SHARE_VM_OOPS_FIELDSTREAMS_HPP
+
+#include "oops/instanceKlass.hpp"
+#include "oops/fieldInfo.hpp"
+
+// The is the base class for iteration over the fields array
+// describing the declared fields in the class.  Several subclasses
+// are provided depending on the kind of iteration required.  The
+// JavaFieldStream is for iterating over regular Java fields and it
+// generally the preferred iterator.  InternalFieldStream only
+// iterates over fields that have been injected by the JVM.
+// AllFieldStream exposes all fields and should only be used in rare
+// cases.
+class FieldStreamBase : public StackObj {
+ protected:
+  typeArrayHandle     _fields;
+  constantPoolHandle  _constants;
+  int                 _index;
+  int                 _limit;
+
+  FieldInfo* field() const { return FieldInfo::from_field_array(_fields(), _index); }
+
+  FieldStreamBase(typeArrayHandle fields, constantPoolHandle constants, int start, int limit) {
+    _fields = fields;
+    _constants = constants;
+    _index = start;
+    _limit = limit;
+  }
+
+  FieldStreamBase(typeArrayHandle fields, constantPoolHandle constants) {
+    _fields = fields;
+    _constants = constants;
+    _index = 0;
+    _limit = fields->length() / FieldInfo::field_slots;
+  }
+
+ public:
+  FieldStreamBase(instanceKlass* klass) {
+    _fields = klass->fields();
+    _constants = klass->constants();
+    _index = 0;
+    _limit = klass->java_fields_count();
+  }
+  FieldStreamBase(instanceKlassHandle klass) {
+    _fields = klass->fields();
+    _constants = klass->constants();
+    _index = 0;
+    _limit = klass->java_fields_count();
+  }
+
+  // accessors
+  int index() const                 { return _index; }
+
+  void next() { _index += 1; }
+  bool done() const { return _index >= _limit; }
+
+  // Accessors for current field
+  AccessFlags access_flags() const {
+    AccessFlags flags;
+    flags.set_flags(field()->access_flags());
+    return flags;
+  }
+
+  void set_access_flags(u2 flags) const {
+    field()->set_access_flags(flags);
+  }
+
+  void set_access_flags(AccessFlags flags) const {
+    set_access_flags(flags.as_short());
+  }
+
+  Symbol* name() const {
+    return field()->name(_constants);
+  }
+
+  Symbol* signature() const {
+    return field()->signature(_constants);
+  }
+
+  Symbol* generic_signature() const {
+    return field()->generic_signature(_constants);
+  }
+
+  int offset() const {
+    return field()->offset();
+  }
+
+  void set_offset(int offset) {
+    field()->set_offset(offset);
+  }
+};
+
+// Iterate over only the internal fields
+class JavaFieldStream : public FieldStreamBase {
+ public:
+  JavaFieldStream(instanceKlass* k):      FieldStreamBase(k->fields(), k->constants(), 0, k->java_fields_count()) {}
+  JavaFieldStream(instanceKlassHandle k): FieldStreamBase(k->fields(), k->constants(), 0, k->java_fields_count()) {}
+
+  int name_index() const {
+    assert(!field()->is_internal(), "regular only");
+    return field()->name_index();
+  }
+  void set_name_index(int index) {
+    assert(!field()->is_internal(), "regular only");
+    field()->set_name_index(index);
+  }
+  int signature_index() const {
+    assert(!field()->is_internal(), "regular only");
+    return field()->signature_index();
+  }
+  void set_signature_index(int index) {
+    assert(!field()->is_internal(), "regular only");
+    field()->set_signature_index(index);
+  }
+  int generic_signature_index() const {
+    assert(!field()->is_internal(), "regular only");
+    return field()->generic_signature_index();
+  }
+  void set_generic_signature_index(int index) {
+    assert(!field()->is_internal(), "regular only");
+    field()->set_generic_signature_index(index);
+  }
+  int initval_index() const {
+    assert(!field()->is_internal(), "regular only");
+    return field()->initval_index();
+  }
+  void set_initval_index(int index) {
+    assert(!field()->is_internal(), "regular only");
+    return field()->set_initval_index(index);
+  }
+};
+
+
+// Iterate over only the internal fields
+class InternalFieldStream : public FieldStreamBase {
+ public:
+  InternalFieldStream(instanceKlass* k):      FieldStreamBase(k->fields(), k->constants(), k->java_fields_count(), k->all_fields_count()) {}
+  InternalFieldStream(instanceKlassHandle k): FieldStreamBase(k->fields(), k->constants(), k->java_fields_count(), k->all_fields_count()) {}
+};
+
+
+class AllFieldStream : public FieldStreamBase {
+ public:
+  AllFieldStream(typeArrayHandle fields, constantPoolHandle constants): FieldStreamBase(fields, constants) {}
+  AllFieldStream(instanceKlass* k):      FieldStreamBase(k->fields(), k->constants()) {}
+  AllFieldStream(instanceKlassHandle k): FieldStreamBase(k->fields(), k->constants()) {}
+};
+
+#endif // SHARE_VM_OOPS_FIELDSTREAMS_HPP
--- a/src/share/vm/oops/generateOopMap.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/generateOopMap.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1567,9 +1567,7 @@
     case Bytecodes::_jsr:               do_jsr(itr->dest());         break;
     case Bytecodes::_jsr_w:             do_jsr(itr->dest_w());       break;
 
-    case Bytecodes::_getstatic:         do_field(true,  true,
-                                                 itr->get_index_u2_cpcache(),
-                                                 itr->bci()); break;
+    case Bytecodes::_getstatic:         do_field(true,  true,  itr->get_index_u2_cpcache(), itr->bci()); break;
     case Bytecodes::_putstatic:         do_field(false, true,  itr->get_index_u2_cpcache(), itr->bci()); break;
     case Bytecodes::_getfield:          do_field(true,  false, itr->get_index_u2_cpcache(), itr->bci()); break;
     case Bytecodes::_putfield:          do_field(false, false, itr->get_index_u2_cpcache(), itr->bci()); break;
--- a/src/share/vm/oops/instanceKlass.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/instanceKlass.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -36,6 +36,7 @@
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/permGen.hpp"
+#include "oops/fieldStreams.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/instanceOop.hpp"
@@ -782,14 +783,11 @@
 
 
 bool instanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
-  const int n = fields()->length();
-  for (int i = 0; i < n; i += next_offset ) {
-    int name_index = fields()->ushort_at(i + name_index_offset);
-    int sig_index  = fields()->ushort_at(i + signature_index_offset);
-    Symbol* f_name = constants()->symbol_at(name_index);
-    Symbol* f_sig  = constants()->symbol_at(sig_index);
+  for (JavaFieldStream fs(as_klassOop()); !fs.done(); fs.next()) {
+    Symbol* f_name = fs.name();
+    Symbol* f_sig  = fs.signature();
     if (f_name == name && f_sig == sig) {
-      fd->initialize(as_klassOop(), i);
+      fd->initialize(as_klassOop(), fs.index());
       return true;
     }
   }
@@ -803,11 +801,10 @@
   closure->do_symbol(&_source_file_name);
   closure->do_symbol(&_source_debug_extension);
 
-  const int n = fields()->length();
-  for (int i = 0; i < n; i += next_offset ) {
-    int name_index = fields()->ushort_at(i + name_index_offset);
+  for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
+    int name_index = fs.name_index();
     closure->do_symbol(constants()->symbol_at_addr(name_index));
-    int sig_index  = fields()->ushort_at(i + signature_index_offset);
+    int sig_index  = fs.signature_index();
     closure->do_symbol(constants()->symbol_at_addr(sig_index));
   }
 }
@@ -872,10 +869,9 @@
 
 
 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
-  int length = fields()->length();
-  for (int i = 0; i < length; i += next_offset) {
-    if (offset_from_fields( i ) == offset) {
-      fd->initialize(as_klassOop(), i);
+  for (JavaFieldStream fs(as_klassOop()); !fs.done(); fs.next()) {
+    if (fs.offset() == offset) {
+      fd->initialize(as_klassOop(), fs.index());
       if (fd->is_static() == is_static) return true;
     }
   }
@@ -906,11 +902,12 @@
 
 
 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
-  fieldDescriptor fd;
-  int length = fields()->length();
-  for (int i = 0; i < length; i += next_offset) {
-    fd.initialize(as_klassOop(), i);
-    if (fd.is_static()) cl->do_field(&fd);
+  for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
+    if (fs.access_flags().is_static()) {
+      fieldDescriptor fd;
+      fd.initialize(as_klassOop(), fs.index());
+      cl->do_field(&fd);
+    }
   }
 }
 
@@ -922,11 +919,12 @@
 
 
 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
-  fieldDescriptor fd;
-  int length = this_oop->fields()->length();
-  for (int i = 0; i < length; i += next_offset) {
-    fd.initialize(this_oop(), i);
-    if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
+  for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
+    if (fs.access_flags().is_static()) {
+      fieldDescriptor fd;
+      fd.initialize(this_oop(), fs.index());
+      f(&fd, CHECK);
+    }
   }
 }
 
@@ -941,11 +939,11 @@
     super->do_nonstatic_fields(cl);
   }
   fieldDescriptor fd;
-  int length = fields()->length();
+  int length = java_fields_count();
   // In DebugInfo nonstatic fields are sorted by offset.
   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
   int j = 0;
-  for (int i = 0; i < length; i += next_offset) {
+  for (int i = 0; i < length; i += 1) {
     fd.initialize(as_klassOop(), i);
     if (!fd.is_static()) {
       fields_sorted[j + 0] = fd.offset();
@@ -1374,39 +1372,8 @@
 
 
 //
-// nmethodBucket is used to record dependent nmethods for
-// deoptimization.  nmethod dependencies are actually <klass, method>
-// pairs but we really only care about the klass part for purposes of
-// finding nmethods which might need to be deoptimized.  Instead of
-// recording the method, a count of how many times a particular nmethod
-// was recorded is kept.  This ensures that any recording errors are
-// noticed since an nmethod should be removed as many times are it's
-// added.
-//
-class nmethodBucket {
- private:
-  nmethod*       _nmethod;
-  int            _count;
-  nmethodBucket* _next;
-
- public:
-  nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
-    _nmethod = nmethod;
-    _next = next;
-    _count = 1;
-  }
-  int count()                             { return _count; }
-  int increment()                         { _count += 1; return _count; }
-  int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
-  nmethodBucket* next()                   { return _next; }
-  void set_next(nmethodBucket* b)         { _next = b; }
-  nmethod* get_nmethod()                  { return _nmethod; }
-};
-
-
-//
 // Walk the list of dependent nmethods searching for nmethods which
-// are dependent on the klassOop that was passed in and mark them for
+// are dependent on the changes that were passed in and mark them for
 // deoptimization.  Returns the number of nmethods found.
 //
 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
@@ -2411,43 +2378,6 @@
   oop_oop_iterate(obj, &blk);
 }
 
-#ifndef PRODUCT
-
-void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
-  // This verification code is disabled.  JDK_Version::is_gte_jdk14x_version()
-  // cannot be called since this function is called before the VM is
-  // able to determine what JDK version is running with.
-  // The check below always is false since 1.4.
-  return;
-
-  // This verification code temporarily disabled for the 1.4
-  // reflection implementation since java.lang.Class now has
-  // Java-level instance fields. Should rewrite this to handle this
-  // case.
-  if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
-    // Verify that java.lang.Class instances have a fake oop field added.
-    instanceKlass* ik = instanceKlass::cast(k);
-
-    // Check that we have the right class
-    static bool first_time = true;
-    guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
-    first_time = false;
-    const int extra = java_lang_Class::number_of_fake_oop_fields;
-    guarantee(ik->nonstatic_field_size() == extra, "just checking");
-    guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
-    guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
-
-    // Check that the map is (2,extra)
-    int offset = java_lang_Class::klass_offset;
-
-    OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
-    guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
-              "sanity");
-  }
-}
-
-#endif // ndef PRODUCT
-
 // JNIid class for jfieldIDs only
 // Note to reviewers:
 // These JNI functions are just moved over to column 1 and not changed
--- a/src/share/vm/oops/instanceKlass.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/instanceKlass.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -27,6 +27,7 @@
 
 #include "oops/constMethodOop.hpp"
 #include "oops/constantPoolOop.hpp"
+#include "oops/fieldInfo.hpp"
 #include "oops/instanceOop.hpp"
 #include "oops/klassOop.hpp"
 #include "oops/klassVtable.hpp"
@@ -228,6 +229,7 @@
   int             _static_field_size;    // number words used by static fields (oop and non-oop) in this klass
   int             _static_oop_field_count;// number of static oop fields in this klass
   int             _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks
+  int             _java_fields_count;    // The number of declared Java fields
   bool            _is_marked_dependent;  // used for marking during flushing and deoptimization
   bool            _rewritten;            // methods rewritten.
   bool            _has_nonstatic_fields; // for sizing with UseCompressedOops
@@ -307,28 +309,29 @@
   objArrayOop transitive_interfaces() const     { return _transitive_interfaces; }
   void set_transitive_interfaces(objArrayOop a) { oop_store_without_check((oop*) &_transitive_interfaces, (oop) a); }
 
-  // fields
-  // Field info extracted from the class file and stored
-  // as an array of 7 shorts
-  enum FieldOffset {
-    access_flags_offset    = 0,
-    name_index_offset      = 1,
-    signature_index_offset = 2,
-    initval_index_offset   = 3,
-    low_offset             = 4,
-    high_offset            = 5,
-    generic_signature_offset = 6,
-    next_offset            = 7
-  };
+ private:
+  friend class fieldDescriptor;
+  FieldInfo* field(int index) const { return FieldInfo::from_field_array(_fields, index); }
+
+ public:
+  int     field_offset      (int index) const { return field(index)->offset(); }
+  int     field_access_flags(int index) const { return field(index)->access_flags(); }
+  Symbol* field_name        (int index) const { return field(index)->name(constants()); }
+  Symbol* field_signature   (int index) const { return field(index)->signature(constants()); }
+
+  // Number of Java declared fields
+  int java_fields_count() const           { return _java_fields_count; }
+
+  // Number of fields including any injected fields
+  int all_fields_count() const            { return _fields->length() / sizeof(FieldInfo::field_slots); }
 
   typeArrayOop fields() const              { return _fields; }
-  int offset_from_fields( int index ) const {
-    return build_int_from_shorts( fields()->ushort_at(index + low_offset),
-                                  fields()->ushort_at(index + high_offset) );
+
+  void set_fields(typeArrayOop f, int java_fields_count) {
+    oop_store_without_check((oop*) &_fields, (oop) f);
+    _java_fields_count = java_fields_count;
   }
 
-  void set_fields(typeArrayOop f)          { oop_store_without_check((oop*) &_fields, (oop) f); }
-
   // inner classes
   typeArrayOop inner_classes() const       { return _inner_classes; }
   void set_inner_classes(typeArrayOop f)   { oop_store_without_check((oop*) &_inner_classes, (oop) f); }
@@ -842,10 +845,6 @@
   // Verification
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
-
-#ifndef PRODUCT
-  static void verify_class_klass_nonstatic_oop_maps(klassOop k) PRODUCT_RETURN;
-#endif
 };
 
 inline methodOop instanceKlass::method_at_vtable(int index)  {
@@ -1013,4 +1012,36 @@
   PreviousVersionInfo* next_previous_version();
 };
 
+
+//
+// nmethodBucket is used to record dependent nmethods for
+// deoptimization.  nmethod dependencies are actually <klass, method>
+// pairs but we really only care about the klass part for purposes of
+// finding nmethods which might need to be deoptimized.  Instead of
+// recording the method, a count of how many times a particular nmethod
+// was recorded is kept.  This ensures that any recording errors are
+// noticed since an nmethod should be removed as many times are it's
+// added.
+//
+class nmethodBucket: public CHeapObj {
+  friend class VMStructs;
+ private:
+  nmethod*       _nmethod;
+  int            _count;
+  nmethodBucket* _next;
+
+ public:
+  nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
+    _nmethod = nmethod;
+    _next = next;
+    _count = 1;
+  }
+  int count()                             { return _count; }
+  int increment()                         { _count += 1; return _count; }
+  int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
+  nmethodBucket* next()                   { return _next; }
+  void set_next(nmethodBucket* b)         { _next = b; }
+  nmethod* get_nmethod()                  { return _nmethod; }
+};
+
 #endif // SHARE_VM_OOPS_INSTANCEKLASS_HPP
--- a/src/share/vm/oops/instanceKlassKlass.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/instanceKlassKlass.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -386,7 +386,7 @@
     ik->set_local_interfaces(NULL);
     ik->set_transitive_interfaces(NULL);
     ik->init_implementor();
-    ik->set_fields(NULL);
+    ik->set_fields(NULL, 0);
     ik->set_constants(NULL);
     ik->set_class_loader(NULL);
     ik->set_protection_domain(NULL);
--- a/src/share/vm/oops/instanceRefKlass.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -45,7 +45,7 @@
 #endif
 
 template <class T>
-static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
+void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
   T heap_oop = oopDesc::load_heap_oop(referent_addr);
   debug_only(
@@ -56,9 +56,8 @@
   if (!oopDesc::is_null(heap_oop)) {
     oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
     if (!referent->is_gc_marked() &&
-        MarkSweep::ref_processor()->
-          discover_reference(obj, ref->reference_type())) {
-      // reference already enqueued, referent will be traversed later
+        MarkSweep::ref_processor()->discover_reference(obj, ref->reference_type())) {
+      // reference was discovered, referent will be traversed later
       ref->instanceKlass::oop_follow_contents(obj);
       debug_only(
         if(TraceReferenceGC && PrintGCDetails) {
@@ -76,8 +75,34 @@
       MarkSweep::mark_and_push(referent_addr);
     }
   }
-  // treat next as normal oop.  next is a link in the pending list.
   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+  if (ReferenceProcessor::pending_list_uses_discovered_field()) {
+    // Treat discovered as normal oop, if ref is not "active",
+    // i.e. if next is non-NULL.
+    T  next_oop = oopDesc::load_heap_oop(next_addr);
+    if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+      T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+      debug_only(
+        if(TraceReferenceGC && PrintGCDetails) {
+          gclog_or_tty->print_cr("   Process discovered as normal "
+                                 INTPTR_FORMAT, discovered_addr);
+        }
+      )
+      MarkSweep::mark_and_push(discovered_addr);
+    }
+  } else {
+#ifdef ASSERT
+    // In the case of older JDKs which do not use the discovered
+    // field for the pending list, an inactive ref (next != NULL)
+    // must always have a NULL discovered field.
+    oop next = oopDesc::load_decode_heap_oop(next_addr);
+    oop discovered = java_lang_ref_Reference::discovered(obj);
+    assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
+           err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
+                   (oopDesc*)obj));
+#endif
+  }
+  // treat next as normal oop.  next is a link in the reference queue.
   debug_only(
     if(TraceReferenceGC && PrintGCDetails) {
       gclog_or_tty->print_cr("   Process next as normal " INTPTR_FORMAT, next_addr);
@@ -130,13 +155,33 @@
       PSParallelCompact::mark_and_push(cm, referent_addr);
     }
   }
-  // treat next as normal oop.  next is a link in the pending list.
   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("   Process next as normal " INTPTR_FORMAT, next_addr);
+  if (ReferenceProcessor::pending_list_uses_discovered_field()) {
+    // Treat discovered as normal oop, if ref is not "active",
+    // i.e. if next is non-NULL.
+    T  next_oop = oopDesc::load_heap_oop(next_addr);
+    if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+      T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+      debug_only(
+        if(TraceReferenceGC && PrintGCDetails) {
+          gclog_or_tty->print_cr("   Process discovered as normal "
+                                 INTPTR_FORMAT, discovered_addr);
+        }
+      )
+      PSParallelCompact::mark_and_push(cm, discovered_addr);
     }
-  )
+  } else {
+#ifdef ASSERT
+    // In the case of older JDKs which do not use the discovered
+    // field for the pending list, an inactive ref (next != NULL)
+    // must always have a NULL discovered field.
+    T next = oopDesc::load_heap_oop(next_addr);
+    oop discovered = java_lang_ref_Reference::discovered(obj);
+    assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
+           err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
+                   (oopDesc*)obj));
+#endif
+  }
   PSParallelCompact::mark_and_push(cm, next_addr);
   ref->instanceKlass::oop_follow_contents(cm, obj);
 }
@@ -197,27 +242,53 @@
 }
 
 #define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains)        \
+  T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);             \
   if (closure->apply_to_weak_ref_discovered_field()) {                          \
-    T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);           \
     closure->do_oop##nv_suffix(disc_addr);                                      \
   }                                                                             \
                                                                                 \
   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);           \
   T heap_oop = oopDesc::load_heap_oop(referent_addr);                           \
-  if (!oopDesc::is_null(heap_oop) && contains(referent_addr)) {                 \
-    ReferenceProcessor* rp = closure->_ref_processor;                           \
+  ReferenceProcessor* rp = closure->_ref_processor;                             \
+  if (!oopDesc::is_null(heap_oop)) {                                            \
     oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);                 \
     if (!referent->is_gc_marked() && (rp != NULL) &&                            \
         rp->discover_reference(obj, reference_type())) {                        \
       return size;                                                              \
-    } else {                                                                    \
+    } else if (contains(referent_addr)) {                                       \
       /* treat referent as normal oop */                                        \
       SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
       closure->do_oop##nv_suffix(referent_addr);                                \
     }                                                                           \
   }                                                                             \
+  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);                   \
+  if (ReferenceProcessor::pending_list_uses_discovered_field()) {               \
+    T next_oop  = oopDesc::load_heap_oop(next_addr);                            \
+    /* Treat discovered as normal oop, if ref is not "active" (next non-NULL) */\
+    if (!oopDesc::is_null(next_oop) && contains(disc_addr)) {                   \
+        /* i.e. ref is not "active" */                                          \
+      debug_only(                                                               \
+        if(TraceReferenceGC && PrintGCDetails) {                                \
+          gclog_or_tty->print_cr("   Process discovered as normal "             \
+                                 INTPTR_FORMAT, disc_addr);                     \
+        }                                                                       \
+      )                                                                         \
+      SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
+      closure->do_oop##nv_suffix(disc_addr);                                    \
+    }                                                                           \
+  } else {                                                                      \
+    /* In the case of older JDKs which do not use the discovered field for  */  \
+    /* the pending list, an inactive ref (next != NULL) must always have a  */  \
+    /* NULL discovered field. */                                                \
+    debug_only(                                                                 \
+      T next_oop = oopDesc::load_heap_oop(next_addr);                           \
+      T disc_oop = oopDesc::load_heap_oop(disc_addr);                           \
+      assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop),          \
+           err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL" \
+                   "discovered field", (oopDesc*)obj));                                   \
+    )                                                                           \
+  }                                                                             \
   /* treat next as normal oop */                                                \
-  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);                   \
   if (contains(next_addr)) {                                                    \
     SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
     closure->do_oop##nv_suffix(next_addr);                                      \
@@ -306,8 +377,37 @@
       pm->claim_or_forward_depth(referent_addr);
     }
   }
-  // treat next as normal oop
+  // Treat discovered as normal oop, if ref is not "active",
+  // i.e. if next is non-NULL.
   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+  if (ReferenceProcessor::pending_list_uses_discovered_field()) {
+    T  next_oop = oopDesc::load_heap_oop(next_addr);
+    if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+      T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+      debug_only(
+        if(TraceReferenceGC && PrintGCDetails) {
+          gclog_or_tty->print_cr("   Process discovered as normal "
+                                 INTPTR_FORMAT, discovered_addr);
+        }
+      )
+      if (PSScavenge::should_scavenge(discovered_addr)) {
+        pm->claim_or_forward_depth(discovered_addr);
+      }
+    }
+  } else {
+#ifdef ASSERT
+    // In the case of older JDKs which do not use the discovered
+    // field for the pending list, an inactive ref (next != NULL)
+    // must always have a NULL discovered field.
+    oop next = oopDesc::load_decode_heap_oop(next_addr);
+    oop discovered = java_lang_ref_Reference::discovered(obj);
+    assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
+           err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
+                   (oopDesc*)obj));
+#endif
+  }
+
+  // Treat next as normal oop;  next is a link in the reference queue.
   if (PSScavenge::should_scavenge(next_addr)) {
     pm->claim_or_forward_depth(next_addr);
   }
--- a/src/share/vm/oops/klassOop.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/klassOop.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -53,8 +53,10 @@
  private:
   // These have no implementation since klassOop should never be accessed in this fashion
   oop obj_field(int offset) const;
+  volatile oop obj_field_volatile(int offset) const;
   void obj_field_put(int offset, oop value);
-  void obj_field_raw_put(int offset, oop value);
+  void obj_field_put_raw(int offset, oop value);
+  void obj_field_put_volatile(int offset, oop value);
 
   jbyte byte_field(int offset) const;
   void byte_field_put(int offset, jbyte contents);
--- a/src/share/vm/oops/methodDataOop.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/methodDataOop.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -600,6 +600,11 @@
   uint taken() {
     return uint_at(taken_off_set);
   }
+
+  void set_taken(uint cnt) {
+    set_uint_at(taken_off_set, cnt);
+  }
+
   // Saturating counter
   uint inc_taken() {
     uint cnt = taken() + 1;
@@ -926,6 +931,10 @@
     return uint_at(not_taken_off_set);
   }
 
+  void set_not_taken(uint cnt) {
+    set_uint_at(not_taken_off_set, cnt);
+  }
+
   uint inc_not_taken() {
     uint cnt = not_taken() + 1;
     // Did we wrap? Will compiler screw us??
--- a/src/share/vm/oops/methodOop.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/methodOop.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -49,6 +49,7 @@
 #include "runtime/relocator.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
+#include "utilities/quickSort.hpp"
 #include "utilities/xmlstream.hpp"
 
 
@@ -913,6 +914,7 @@
                                                Symbol* name,
                                                Symbol* signature,
                                                Handle method_type, TRAPS) {
+  ResourceMark rm;
   methodHandle empty;
 
   assert(holder() == SystemDictionary::MethodHandle_klass(),
@@ -1241,41 +1243,6 @@
   if (WizardMode) signature()->print_symbol_on(st);
 }
 
-
-extern "C" {
-  static int method_compare(methodOop* a, methodOop* b) {
-    return (*a)->name()->fast_compare((*b)->name());
-  }
-
-  // Prevent qsort from reordering a previous valid sort by
-  // considering the address of the methodOops if two methods
-  // would otherwise compare as equal.  Required to preserve
-  // optimal access order in the shared archive.  Slower than
-  // method_compare, only used for shared archive creation.
-  static int method_compare_idempotent(methodOop* a, methodOop* b) {
-    int i = method_compare(a, b);
-    if (i != 0) return i;
-    return ( a < b ? -1 : (a == b ? 0 : 1));
-  }
-
-  // We implement special compare versions for narrow oops to avoid
-  // testing for UseCompressedOops on every comparison.
-  static int method_compare_narrow(narrowOop* a, narrowOop* b) {
-    methodOop m = (methodOop)oopDesc::load_decode_heap_oop(a);
-    methodOop n = (methodOop)oopDesc::load_decode_heap_oop(b);
-    return m->name()->fast_compare(n->name());
-  }
-
-  static int method_compare_narrow_idempotent(narrowOop* a, narrowOop* b) {
-    int i = method_compare_narrow(a, b);
-    if (i != 0) return i;
-    return ( a < b ? -1 : (a == b ? 0 : 1));
-  }
-
-  typedef int (*compareFn)(const void*, const void*);
-}
-
-
 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
 static void reorder_based_on_method_index(objArrayOop methods,
                                           objArrayOop annotations,
@@ -1299,6 +1266,21 @@
   }
 }
 
+// Comparer for sorting an object array containing
+// methodOops.
+// Used non-template method_comparator methods since
+// Visual Studio 2003 compiler generates incorrect
+// optimized code for it.
+static int method_comparator_narrowOop(narrowOop a, narrowOop b) {
+  methodOop m = (methodOop)oopDesc::decode_heap_oop_not_null(a);
+  methodOop n = (methodOop)oopDesc::decode_heap_oop_not_null(b);
+  return m->name()->fast_compare(n->name());
+}
+static int method_comparator_oop(oop a, oop b) {
+  methodOop m = (methodOop)a;
+  methodOop n = (methodOop)b;
+  return m->name()->fast_compare(n->name());
+}
 
 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
 void methodOopDesc::sort_methods(objArrayOop methods,
@@ -1321,30 +1303,19 @@
         m->set_method_idnum(i);
       }
     }
-
-    // Use a simple bubble sort for small number of methods since
-    // qsort requires a functional pointer call for each comparison.
-    if (length < 8) {
-      bool sorted = true;
-      for (int i=length-1; i>0; i--) {
-        for (int j=0; j<i; j++) {
-          methodOop m1 = (methodOop)methods->obj_at(j);
-          methodOop m2 = (methodOop)methods->obj_at(j+1);
-          if ((uintptr_t)m1->name() > (uintptr_t)m2->name()) {
-            methods->obj_at_put(j, m2);
-            methods->obj_at_put(j+1, m1);
-            sorted = false;
-          }
-        }
-        if (sorted) break;
-          sorted = true;
+    {
+      No_Safepoint_Verifier nsv;
+      if (UseCompressedOops) {
+        QuickSort::sort<narrowOop>((narrowOop*)(methods->base()), length, method_comparator_narrowOop, idempotent);
+      } else {
+        QuickSort::sort<oop>((oop*)(methods->base()), length, method_comparator_oop, idempotent);
       }
-    } else {
-      compareFn compare =
-        (UseCompressedOops ?
-         (compareFn) (idempotent ? method_compare_narrow_idempotent : method_compare_narrow):
-         (compareFn) (idempotent ? method_compare_idempotent : method_compare));
-      qsort(methods->base(), length, heapOopSize, compare);
+      if (UseConcMarkSweepGC) {
+        // For CMS we need to dirty the cards for the array
+        BarrierSet* bs = Universe::heap()->barrier_set();
+        assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+        bs->write_ref_array(methods->base(), length);
+      }
     }
 
     // Sort annotations if necessary
--- a/src/share/vm/oops/oop.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/oop.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -214,8 +214,10 @@
 
   // Access to fields in a instanceOop through these methods.
   oop obj_field(int offset) const;
+  volatile oop obj_field_volatile(int offset) const;
   void obj_field_put(int offset, oop value);
-  void obj_field_raw_put(int offset, oop value);
+  void obj_field_put_raw(int offset, oop value);
+  void obj_field_put_volatile(int offset, oop value);
 
   jbyte byte_field(int offset) const;
   void byte_field_put(int offset, jbyte contents);
--- a/src/share/vm/oops/oop.inline.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/oop.inline.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -321,15 +321,25 @@
     load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
     load_decode_heap_oop(obj_field_addr<oop>(offset));
 }
+inline volatile oop oopDesc::obj_field_volatile(int offset) const {
+  volatile oop value = obj_field(offset);
+  OrderAccess::acquire();
+  return value;
+}
 inline void oopDesc::obj_field_put(int offset, oop value) {
   UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
                       oop_store(obj_field_addr<oop>(offset),       value);
 }
-inline void oopDesc::obj_field_raw_put(int offset, oop value) {
+inline void oopDesc::obj_field_put_raw(int offset, oop value) {
   UseCompressedOops ?
     encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
     encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
 }
+inline void oopDesc::obj_field_put_volatile(int offset, oop value) {
+  OrderAccess::release();
+  obj_field_put(offset, value);
+  OrderAccess::fence();
+}
 
 inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
 inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
--- a/src/share/vm/oops/typeArrayKlass.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -84,11 +84,7 @@
       KlassHandle h_k(THREAD, as_klassOop());
       typeArrayOop t;
       CollectedHeap* ch = Universe::heap();
-      if (size < ch->large_typearray_limit()) {
-        t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
-      } else {
-        t = (typeArrayOop)CollectedHeap::large_typearray_allocate(h_k, (int)size, length, CHECK_NULL);
-      }
+      t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
       assert(t->is_parsable(), "Don't publish unless parsable");
       return t;
     } else {
--- a/src/share/vm/opto/block.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/block.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -80,35 +80,37 @@
 
 uint Block::code_alignment() {
   // Check for Root block
-  if( _pre_order == 0 ) return CodeEntryAlignment;
+  if (_pre_order == 0) return CodeEntryAlignment;
   // Check for Start block
-  if( _pre_order == 1 ) return InteriorEntryAlignment;
+  if (_pre_order == 1) return InteriorEntryAlignment;
   // Check for loop alignment
-  if (has_loop_alignment())  return loop_alignment();
+  if (has_loop_alignment()) return loop_alignment();
 
-  return 1;                     // no particular alignment
+  return relocInfo::addr_unit(); // no particular alignment
 }
 
 uint Block::compute_loop_alignment() {
   Node *h = head();
-  if( h->is_Loop() && h->as_Loop()->is_inner_loop() )  {
+  int unit_sz = relocInfo::addr_unit();
+  if (h->is_Loop() && h->as_Loop()->is_inner_loop())  {
     // Pre- and post-loops have low trip count so do not bother with
     // NOPs for align loop head.  The constants are hidden from tuning
     // but only because my "divide by 4" heuristic surely gets nearly
     // all possible gain (a "do not align at all" heuristic has a
     // chance of getting a really tiny gain).
-    if( h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
-                                h->as_CountedLoop()->is_post_loop()) )
-      return (OptoLoopAlignment > 4) ? (OptoLoopAlignment>>2) : 1;
+    if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
+                                h->as_CountedLoop()->is_post_loop())) {
+      return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz;
+    }
     // Loops with low backedge frequency should not be aligned.
     Node *n = h->in(LoopNode::LoopBackControl)->in(0);
-    if( n->is_MachIf() && n->as_MachIf()->_prob < 0.01 ) {
-      return 1;             // Loop does not loop, more often than not!
+    if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) {
+      return unit_sz; // Loop does not loop, more often than not!
     }
     return OptoLoopAlignment; // Otherwise align loop head
   }
 
-  return 1;                     // no particular alignment
+  return unit_sz; // no particular alignment
 }
 
 //-----------------------------------------------------------------------------
@@ -165,7 +167,7 @@
   int end_idx = _nodes.size()-1;
 
   // Check for ending goto
-  if ((end_idx > 0) && (_nodes[end_idx]->is_Goto())) {
+  if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) {
     success_result = empty_with_goto;
     end_idx--;
   }
@@ -197,11 +199,11 @@
 bool Block::has_uncommon_code() const {
   Node* en = end();
 
-  if (en->is_Goto())
+  if (en->is_MachGoto())
     en = en->in(0);
   if (en->is_Catch())
     en = en->in(0);
-  if (en->is_Proj() && en->in(0)->is_MachCall()) {
+  if (en->is_MachProj() && en->in(0)->is_MachCall()) {
     MachCallNode* call = en->in(0)->as_MachCall();
     if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
       // This is true for slow-path stubs like new_{instance,array},
@@ -271,55 +273,55 @@
 
 //------------------------------dump-------------------------------------------
 #ifndef PRODUCT
-void Block::dump_bidx(const Block* orig) const {
-  if (_pre_order) tty->print("B%d",_pre_order);
-  else tty->print("N%d", head()->_idx);
+void Block::dump_bidx(const Block* orig, outputStream* st) const {
+  if (_pre_order) st->print("B%d",_pre_order);
+  else st->print("N%d", head()->_idx);
 
   if (Verbose && orig != this) {
     // Dump the original block's idx
-    tty->print(" (");
-    orig->dump_bidx(orig);
-    tty->print(")");
+    st->print(" (");
+    orig->dump_bidx(orig, st);
+    st->print(")");
   }
 }
 
-void Block::dump_pred(const Block_Array *bbs, Block* orig) const {
+void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const {
   if (is_connector()) {
     for (uint i=1; i<num_preds(); i++) {
       Block *p = ((*bbs)[pred(i)->_idx]);
-      p->dump_pred(bbs, orig);
+      p->dump_pred(bbs, orig, st);
     }
   } else {
-    dump_bidx(orig);
-    tty->print(" ");
+    dump_bidx(orig, st);
+    st->print(" ");
   }
 }
 
-void Block::dump_head( const Block_Array *bbs ) const {
+void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
   // Print the basic block
-  dump_bidx(this);
-  tty->print(": #\t");
+  dump_bidx(this, st);
+  st->print(": #\t");
 
   // Print the incoming CFG edges and the outgoing CFG edges
   for( uint i=0; i<_num_succs; i++ ) {
-    non_connector_successor(i)->dump_bidx(_succs[i]);
-    tty->print(" ");
+    non_connector_successor(i)->dump_bidx(_succs[i], st);
+    st->print(" ");
   }
-  tty->print("<- ");
+  st->print("<- ");
   if( head()->is_block_start() ) {
     for (uint i=1; i<num_preds(); i++) {
       Node *s = pred(i);
       if (bbs) {
         Block *p = (*bbs)[s->_idx];
-        p->dump_pred(bbs, p);
+        p->dump_pred(bbs, p, st);
       } else {
         while (!s->is_block_start())
           s = s->in(0);
-        tty->print("N%d ", s->_idx );
+        st->print("N%d ", s->_idx );
       }
     }
   } else
-    tty->print("BLOCK HEAD IS JUNK  ");
+    st->print("BLOCK HEAD IS JUNK  ");
 
   // Print loop, if any
   const Block *bhead = this;    // Head of self-loop
@@ -330,24 +332,24 @@
     while (bx->is_connector()) {
       bx = (*bbs)[bx->pred(1)->_idx];
     }
-    tty->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
+    st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
     // Dump any loop-specific bits, especially for CountedLoops.
-    loop->dump_spec(tty);
+    loop->dump_spec(st);
   } else if (has_loop_alignment()) {
-    tty->print(" top-of-loop");
+    st->print(" top-of-loop");
   }
-  tty->print(" Freq: %g",_freq);
+  st->print(" Freq: %g",_freq);
   if( Verbose || WizardMode ) {
-    tty->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
-    tty->print(" RegPressure: %d",_reg_pressure);
-    tty->print(" IHRP Index: %d",_ihrp_index);
-    tty->print(" FRegPressure: %d",_freg_pressure);
-    tty->print(" FHRP Index: %d",_fhrp_index);
+    st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
+    st->print(" RegPressure: %d",_reg_pressure);
+    st->print(" IHRP Index: %d",_ihrp_index);
+    st->print(" FRegPressure: %d",_freg_pressure);
+    st->print(" FHRP Index: %d",_fhrp_index);
   }
-  tty->print_cr("");
+  st->print_cr("");
 }
 
-void Block::dump() const { dump(0); }
+void Block::dump() const { dump(NULL); }
 
 void Block::dump( const Block_Array *bbs ) const {
   dump_head(bbs);
@@ -441,9 +443,9 @@
       Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
       _bbs.map(p->_idx,bb);
       _bbs.map(x->_idx,bb);
-      if( x != p )                  // Only for root is x == p
+      if( x != p ) {                // Only for root is x == p
         bb->_nodes.push((Node*)x);
-
+      }
       // Now handle predecessors
       ++sum;                        // Count 1 for self block
       uint cnt = bb->num_preds();
@@ -837,7 +839,7 @@
 
       // Make sure we TRUE branch to the target
       if( proj0->Opcode() == Op_IfFalse ) {
-        iff->negate();
+        iff->as_MachIf()->negate();
       }
 
       b->_nodes.pop();          // Remove IfFalse & IfTrue projections
@@ -945,8 +947,8 @@
     assert( bp, "last instruction must be a block proj" );
     assert( bp == b->_nodes[j], "wrong number of successors for this block" );
     if( bp->is_Catch() ) {
-      while( b->_nodes[--j]->Opcode() == Op_MachProj ) ;
-      assert( b->_nodes[j]->is_Call(), "CatchProj must follow call" );
+      while( b->_nodes[--j]->is_MachProj() ) ;
+      assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" );
     }
     else if( bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If ) {
       assert( b->_num_succs == 2, "Conditional branch must have two targets");
@@ -1105,7 +1107,7 @@
 
 //------------------------------trace_frequency_order--------------------------
 // Comparison function for edges
-static int trace_frequency_order(const void *p0, const void *p1) {
+extern "C" int trace_frequency_order(const void *p0, const void *p1) {
   Trace *tr0 = *(Trace **) p0;
   Trace *tr1 = *(Trace **) p1;
   Block *b0 = tr0->first_block();
--- a/src/share/vm/opto/block.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/block.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,7 @@
 // Note that the constructor just zeros things, and since I use Arena
 // allocation I do not need a destructor to reclaim storage.
 class Block_Array : public ResourceObj {
+  friend class VMStructs;
   uint _size;                   // allocated size, as opposed to formal limit
   debug_only(uint _limit;)      // limit to formal domain
 protected:
@@ -72,6 +73,7 @@
 
 
 class Block_List : public Block_Array {
+  friend class VMStructs;
 public:
   uint _cnt;
   Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
@@ -87,6 +89,7 @@
 
 
 class CFGElement : public ResourceObj {
+  friend class VMStructs;
  public:
   float _freq; // Execution frequency (estimate)
 
@@ -102,6 +105,7 @@
 // Basic blocks are used during the output routines, and are not used during
 // any optimization pass.  They are created late in the game.
 class Block : public CFGElement {
+  friend class VMStructs;
  public:
   // Nodes in this block, in order
   Node_List _nodes;
@@ -329,10 +333,10 @@
 
 #ifndef PRODUCT
   // Debugging print of basic block
-  void dump_bidx(const Block* orig) const;
-  void dump_pred(const Block_Array *bbs, Block* orig) const;
-  void dump_head( const Block_Array *bbs ) const;
-  void dump( ) const;
+  void dump_bidx(const Block* orig, outputStream* st = tty) const;
+  void dump_pred(const Block_Array *bbs, Block* orig, outputStream* st = tty) const;
+  void dump_head( const Block_Array *bbs, outputStream* st = tty ) const;
+  void dump() const;
   void dump( const Block_Array *bbs ) const;
 #endif
 };
@@ -341,6 +345,7 @@
 //------------------------------PhaseCFG---------------------------------------
 // Build an array of Basic Block pointers, one per Node.
 class PhaseCFG : public Phase {
+  friend class VMStructs;
  private:
   // Build a proper looking cfg.  Return count of basic blocks
   uint build_cfg();
@@ -515,6 +520,7 @@
 
 //------------------------------CFGLoop-------------------------------------------
 class CFGLoop : public CFGElement {
+  friend class VMStructs;
   int _id;
   int _depth;
   CFGLoop *_parent;      // root of loop tree is the method level "pseudo" loop, it's parent is null
@@ -566,6 +572,7 @@
 // A edge between two basic blocks that will be embodied by a branch or a
 // fall-through.
 class CFGEdge : public ResourceObj {
+  friend class VMStructs;
  private:
   Block * _from;        // Source basic block
   Block * _to;          // Destination basic block
@@ -702,6 +709,7 @@
 //------------------------------PhaseBlockLayout-------------------------------
 // Rearrange blocks into some canonical order, based on edges and their frequencies
 class PhaseBlockLayout : public Phase {
+  friend class VMStructs;
   PhaseCFG &_cfg;               // Control flow graph
 
   GrowableArray<CFGEdge *> *edges;
--- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -45,7 +45,7 @@
   _method(callee),
   _site_invoke_ratio(site_invoke_ratio),
   _max_inline_level(max_inline_level),
-  _count_inline_bcs(method()->code_size())
+  _count_inline_bcs(method()->code_size_for_inlining())
 {
   NOT_PRODUCT(_count_inlines = 0;)
   if (_caller_jvms != NULL) {
@@ -107,7 +107,7 @@
 
   // positive filter: should send be inlined?  returns NULL (--> yes)
   // or rejection msg
-  int size = callee_method->code_size();
+  int size = callee_method->code_size_for_inlining();
 
   // Check for too many throws (and not too huge)
   if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
@@ -141,7 +141,21 @@
     assert(mha_profile, "must exist");
     CounterData* cd = mha_profile->as_CounterData();
     invoke_count = cd->count();
-    call_site_count = invoke_count;  // use the same value
+    if (invoke_count == 0) {
+      return "method handle not reached";
+    }
+
+    if (_caller_jvms != NULL && _caller_jvms->method() != NULL &&
+        _caller_jvms->method()->method_data() != NULL &&
+        !_caller_jvms->method()->method_data()->is_empty()) {
+      ciMethodData* mdo = _caller_jvms->method()->method_data();
+      ciProfileData* mha_profile = mdo->bci_to_data(_caller_jvms->bci());
+      assert(mha_profile, "must exist");
+      CounterData* cd = mha_profile->as_CounterData();
+      call_site_count = cd->count();
+    } else {
+      call_site_count = invoke_count;  // use the same value
+    }
   }
 
   assert(invoke_count != 0, "require invocation count greater than zero");
@@ -244,7 +258,7 @@
   }
 
   // use frequency-based objections only for non-trivial methods
-  if (callee_method->code_size() <= MaxTrivialSize) return NULL;
+  if (callee_method->code_size_for_inlining() <= MaxTrivialSize) return NULL;
 
   // don't use counts with -Xcomp or CTW
   if (UseInterpreter && !CompileTheWorld) {
@@ -305,7 +319,7 @@
   }
 
   // suppress a few checks for accessors and trivial methods
-  if (callee_method->code_size() > MaxTrivialSize) {
+  if (callee_method->code_size_for_inlining() > MaxTrivialSize) {
 
     // don't inline into giant methods
     if (C->unique() > (uint)NodeCountInliningCutoff) {
@@ -349,7 +363,7 @@
     }
   }
 
-  int size = callee_method->code_size();
+  int size = callee_method->code_size_for_inlining();
 
   if (UseOldInlining && ClipInlining
       && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
@@ -394,6 +408,16 @@
   return true;
 }
 
+//------------------------------check_can_parse--------------------------------
+const char* InlineTree::check_can_parse(ciMethod* callee) {
+  // Certain methods cannot be parsed at all:
+  if ( callee->is_native())                     return "native method";
+  if (!callee->can_be_compiled())               return "not compilable (disabled)";
+  if (!callee->has_balanced_monitors())         return "not compilable (unbalanced monitors)";
+  if ( callee->get_flow_analysis()->failing())  return "not compilable (flow analysis failed)";
+  return NULL;
+}
+
 //------------------------------print_inlining---------------------------------
 // Really, the failure_msg can be a success message also.
 void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
@@ -423,14 +447,22 @@
   int         caller_bci    = jvms->bci();
   ciMethod   *caller_method = jvms->method();
 
-  if( !pass_initial_checks(caller_method, caller_bci, callee_method)) {
-    if( PrintInlining ) {
+  // Do some initial checks.
+  if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
+    if (PrintInlining) {
       failure_msg = "failed_initial_checks";
-      print_inlining( callee_method, caller_bci, failure_msg);
+      print_inlining(callee_method, caller_bci, failure_msg);
     }
     return NULL;
   }
 
+  // Do some parse checks.
+  failure_msg = check_can_parse(callee_method);
+  if (failure_msg != NULL) {
+    if (PrintInlining)  print_inlining(callee_method, caller_bci, failure_msg);
+    return NULL;
+  }
+
   // Check if inlining policy says no.
   WarmCallInfo wci = *(initial_wci);
   failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci);
@@ -471,7 +503,7 @@
     if (failure_msg == NULL)  failure_msg = "inline (hot)";
 
     // Inline!
-    if( PrintInlining ) print_inlining( callee_method, caller_bci, failure_msg);
+    if (PrintInlining)  print_inlining(callee_method, caller_bci, failure_msg);
     if (UseOldInlining)
       build_inline_tree_for_callee(callee_method, jvms, caller_bci);
     if (InlineWarmCalls && !wci.is_hot())
@@ -481,7 +513,7 @@
 
   // Do not inline
   if (failure_msg == NULL)  failure_msg = "too cold to inline";
-  if( PrintInlining ) print_inlining( callee_method, caller_bci, failure_msg);
+  if (PrintInlining)  print_inlining(callee_method, caller_bci, failure_msg);
   return NULL;
 }
 
@@ -578,3 +610,22 @@
   }
   return iltp;
 }
+
+
+
+#ifndef PRODUCT
+void InlineTree::print_impl(outputStream* st, int indent) const {
+  for (int i = 0; i < indent; i++) st->print(" ");
+  st->print(" @ %d ", caller_bci());
+  method()->print_short_name(st);
+  st->cr();
+
+  for (int i = 0 ; i < _subtrees.length(); i++) {
+    _subtrees.at(i)->print_impl(st, indent + 2);
+  }
+}
+
+void InlineTree::print_value_on(outputStream* st) const {
+  print_impl(st, 2);
+}
+#endif
--- a/src/share/vm/opto/callGenerator.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/callGenerator.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "ci/bcEscapeAnalyzer.hpp"
+#include "ci/ciCallSite.hpp"
 #include "ci/ciCPCache.hpp"
 #include "ci/ciMethodHandle.hpp"
 #include "classfile/javaClasses.hpp"
@@ -60,12 +61,9 @@
   {
     _is_osr        = is_osr;
     _expected_uses = expected_uses;
-    assert(can_parse(method, is_osr), "parse must be possible");
+    assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
   }
 
-  // Can we build either an OSR or a regular parser for this method?
-  static bool can_parse(ciMethod* method, int is_osr = false);
-
   virtual bool      is_parse() const           { return true; }
   virtual JVMState* generate(JVMState* jvms);
   int is_osr() { return _is_osr; }
@@ -151,7 +149,6 @@
     call->set_optimized_virtual(true);
     if (method()->is_method_handle_invoke()) {
       call->set_method_handle_invoke(true);
-      kit.C->set_has_method_handle_invokes(true);
     }
   }
   kit.set_arguments_for_java_call(call);
@@ -209,7 +206,6 @@
   call->set_optimized_virtual(true);
   // Take extra care (in the presence of argument motion) not to trash the SP:
   call->set_method_handle_invoke(true);
-  kit.C->set_has_method_handle_invokes(true);
 
   // Pass the target MethodHandle as first argument and shift the
   // other arguments.
@@ -302,20 +298,8 @@
   return kit.transfer_exceptions_into_jvms();
 }
 
-bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) {
-  // Certain methods cannot be parsed at all:
-  if (!m->can_be_compiled())              return false;
-  if (!m->has_balanced_monitors())        return false;
-  if (m->get_flow_analysis()->failing())  return false;
-
-  // (Methods may bail out for other reasons, after the parser is run.
-  // We try to avoid this, but if forced, we must return (Node*)NULL.
-  // The user of the CallGenerator must check for this condition.)
-  return true;
-}
-
 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
-  if (!ParseGenerator::can_parse(m))  return NULL;
+  if (InlineTree::check_can_parse(m) != NULL)  return NULL;
   return new ParseGenerator(m, expected_uses);
 }
 
@@ -323,7 +307,7 @@
 // for the method execution already in progress, not just the JVMS
 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
-  if (!ParseGenerator::can_parse(m, true))  return NULL;
+  if (InlineTree::check_can_parse(m) != NULL)  return NULL;
   float past_uses = m->interpreter_invocation_count();
   float expected_uses = past_uses;
   return new ParseGenerator(m, expected_uses, true);
@@ -335,7 +319,7 @@
 }
 
 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
-  assert(m->is_method_handle_invoke(), "for_dynamic_call mismatch");
+  assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
   return new DynamicCallGenerator(m);
 }
 
@@ -714,24 +698,63 @@
     // Get an adapter for the MethodHandle.
     ciMethod* target_method = method_handle->get_method_handle_adapter();
     if (target_method != NULL) {
-      CallGenerator* hit_cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, 1);
-      if (hit_cg != NULL && hit_cg->is_inline())
-        return hit_cg;
+      CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
+      if (cg != NULL && cg->is_inline())
+        return cg;
     }
   } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 &&
              method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) {
+    float prob = PROB_FAIR;
+    Node* meth_region = method_handle->in(0);
+    if (meth_region->is_Region() &&
+        meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() &&
+        meth_region->in(1)->in(0) == meth_region->in(2)->in(0) &&
+        meth_region->in(1)->in(0)->is_If()) {
+      // If diamond, so grab the probability of the test to drive the inlining below
+      prob = meth_region->in(1)->in(0)->as_If()->_prob;
+      if (meth_region->in(1)->is_IfTrue()) {
+        prob = 1 - prob;
+      }
+    }
+
     // selectAlternative idiom merging two constant MethodHandles.
     // Generate a guard so that each can be inlined.  We might want to
     // do more inputs at later point but this gets the most common
     // case.
-    const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
-    ciObject* const_oop = oop_ptr->const_oop();
-    ciMethodHandle* mh = const_oop->as_method_handle();
+    CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob));
+    CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile.rescale(prob));
+    if (cg1 != NULL && cg2 != NULL) {
+      const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
+      ciObject* const_oop = oop_ptr->const_oop();
+      ciMethodHandle* mh = const_oop->as_method_handle();
+      return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob);
+    }
+  }
+  return NULL;
+}
+
+
+CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
+                                                       ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
+  ciMethodHandle* method_handle = call_site->get_target();
 
-    CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile);
-    CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile);
-    if (cg1 != NULL && cg2 != NULL) {
-      return new PredictedDynamicCallGenerator(mh, cg2, cg1, PROB_FAIR);
+  // Set the callee to have access to the class and signature in the
+  // MethodHandleCompiler.
+  method_handle->set_callee(callee);
+  method_handle->set_caller(caller);
+  method_handle->set_call_profile(profile);
+
+  // Get an adapter for the MethodHandle.
+  ciMethod* target_method = method_handle->get_invokedynamic_adapter();
+  if (target_method != NULL) {
+    Compile *C = Compile::current();
+    CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
+    if (cg != NULL && cg->is_inline()) {
+      // Add a dependence for invalidation of the optimization.
+      if (!call_site->is_constant_call_site()) {
+        C->dependencies()->assert_call_site_target_value(call_site, method_handle);
+      }
+      return cg;
     }
   }
   return NULL;
--- a/src/share/vm/opto/callGenerator.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/callGenerator.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -111,7 +111,8 @@
   static CallGenerator* for_dynamic_call(ciMethod* m);   // invokedynamic
   static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index);  // virtual, interface
 
-  static CallGenerator* for_method_handle_inline(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
+  static CallGenerator* for_method_handle_inline(Node* method_handle,   JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
+  static CallGenerator* for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
 
   // How to generate a replace a direct call with an inline version
   static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
--- a/src/share/vm/opto/callnode.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/callnode.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,7 +67,6 @@
   const TypeTuple *_domain;
   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
     init_class_id(Class_Start);
-    init_flags(Flag_is_block_start);
     init_req(0,this);
     init_req(1,root);
   }
@@ -188,6 +187,7 @@
 // This provides a way to map the optimized program back into the interpreter,
 // or to let the GC mark the stack.
 class JVMState : public ResourceObj {
+  friend class VMStructs;
 public:
   typedef enum {
     Reexecute_Undefined = -1, // not defined -- will be translated into false later
@@ -501,6 +501,7 @@
 // Call nodes now subsume the function of debug nodes at callsites, so they
 // contain the functionality of a full scope chain of debug nodes.
 class CallNode : public SafePointNode {
+  friend class VMStructs;
 public:
   const TypeFunc *_tf;        // Function type
   address      _entry_point;  // Address of method being called
@@ -513,7 +514,6 @@
       _cnt(COUNT_UNKNOWN)
   {
     init_class_id(Class_Call);
-    init_flags(Flag_is_Call);
   }
 
   const TypeFunc* tf()        const { return _tf; }
@@ -567,6 +567,7 @@
 // convention.  (The "Java" calling convention is the compiler's calling
 // convention, as opposed to the interpreter's or that of native C.)
 class CallJavaNode : public CallNode {
+  friend class VMStructs;
 protected:
   virtual uint cmp( const Node &n ) const;
   virtual uint size_of() const; // Size is bigger
--- a/src/share/vm/opto/cfgnode.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/cfgnode.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -217,9 +217,7 @@
 // GotoNodes perform direct branches.
 class GotoNode : public Node {
 public:
-  GotoNode( Node *control ) : Node(control) {
-    init_flags(Flag_is_Goto);
-  }
+  GotoNode( Node *control ) : Node(control) {}
   virtual int Opcode() const;
   virtual bool pinned() const { return true; }
   virtual bool  is_CFG() const { return true; }
--- a/src/share/vm/opto/chaitin.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/chaitin.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,7 @@
 //------------------------------LRG--------------------------------------------
 // Live-RanGe structure.
 class LRG : public ResourceObj {
+  friend class VMStructs;
 public:
   enum { SPILL_REG=29999 };     // Register number of a spilled LRG
 
@@ -181,6 +182,7 @@
 // Map Node indices to Live RanGe indices.
 // Array lookup in the optimized case.
 class LRG_List : public ResourceObj {
+  friend class VMStructs;
   uint _cnt, _max;
   uint* _lidxs;
   ReallocMark _nesting;         // assertion check for reallocations
@@ -211,6 +213,7 @@
 // abstract!  It needs abstraction so I can fiddle with the implementation to
 // get even more speed.
 class PhaseIFG : public Phase {
+  friend class VMStructs;
   // Current implementation: a triangular adjacency list.
 
   // Array of adjacency-lists, indexed by live-range number
@@ -294,6 +297,7 @@
 //------------------------------Chaitin----------------------------------------
 // Briggs-Chaitin style allocation, mostly.
 class PhaseChaitin : public PhaseRegAlloc {
+  friend class VMStructs;
 
   int _trip_cnt;
   int _alternate;
@@ -482,6 +486,7 @@
   }
 
   int yank_if_dead( Node *old, Block *current_block, Node_List *value, Node_List *regnd );
+  int yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd );
   int elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List &regnd, bool can_change_regs );
   int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List &value, Node_List &regnd );
   bool may_be_copy_of_callee( Node *def ) const;
--- a/src/share/vm/opto/classes.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/classes.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -161,8 +161,10 @@
 macro(MachProj)
 macro(MaxI)
 macro(MemBarAcquire)
+macro(MemBarAcquireLock)
 macro(MemBarCPUOrder)
 macro(MemBarRelease)
+macro(MemBarReleaseLock)
 macro(MemBarVolatile)
 macro(MergeMem)
 macro(MinI)
@@ -194,6 +196,7 @@
 macro(PopCountI)
 macro(PopCountL)
 macro(PowD)
+macro(PrefetchAllocation)
 macro(PrefetchRead)
 macro(PrefetchWrite)
 macro(Proj)
--- a/src/share/vm/opto/coalesce.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/coalesce.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -139,7 +139,7 @@
   Block *bcon = _cfg._bbs[con->_idx];
   uint cindex = bcon->find_node(con);
   Node *con_next = bcon->_nodes[cindex+1];
-  if( con_next->in(0) != con || con_next->Opcode() != Op_MachProj )
+  if( con_next->in(0) != con || !con_next->is_MachProj() )
     return false;               // No MachProj's follow
 
   // Copy kills after the cloned constant
@@ -312,7 +312,7 @@
     // parallel renaming effort.
     if( n->_idx < _unique ) break;
     uint idx = n->is_Copy();
-    assert( idx || n->is_Con() || n->Opcode() == Op_MachProj, "Only copies during parallel renaming" );
+    assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
     if( idx && _phc.Find(n->in(idx)) == dst_name ) break;
     i--;
   }
@@ -329,7 +329,7 @@
     // Check for end of virtual copies; this is also the end of the
     // parallel renaming effort.
     if( n->_idx < _unique ) break;
-    assert( n->is_Copy() || n->is_Con() || n->Opcode() == Op_MachProj, "Only copies during parallel renaming" );
+    assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
     if( _phc.Find(n) == src_name ) {
       kill_src_idx = i;
       break;
--- a/src/share/vm/opto/compile.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/compile.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -517,7 +517,20 @@
   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
 
   // Do the emission.
+
+  Label fakeL; // Fake label for branch instructions.
+  Label*   saveL = NULL;
+  uint save_bnum = 0;
+  bool is_branch = n->is_MachBranch();
+  if (is_branch) {
+    MacroAssembler masm(&buf);
+    masm.bind(fakeL);
+    n->as_MachBranch()->save_label(&saveL, &save_bnum);
+    n->as_MachBranch()->label_set(&fakeL, 0);
+  }
   n->emit(buf, this->regalloc());
+  if (is_branch) // Restore label.
+    n->as_MachBranch()->label_set(saveL, save_bnum);
 
   // End scratch_emit_size section.
   set_in_scratch_emit_size(false);
@@ -804,7 +817,6 @@
                            &_handler_table, &_inc_table,
                            compiler,
                            env()->comp_level(),
-                           true, /*has_debug_info*/
                            has_unsafe_access()
                            );
   }
@@ -1206,11 +1218,7 @@
     // Make sure the Bottom and NotNull variants alias the same.
     // Also, make sure exact and non-exact variants alias the same.
     if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) {
-      if (ta->const_oop()) {
-        tj = ta = TypeAryPtr::make(TypePtr::Constant,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
-      } else {
-        tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
-      }
+      tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
     }
   }
 
--- a/src/share/vm/opto/compile.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/compile.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -80,6 +80,8 @@
 // This class defines a top-level Compiler invocation.
 
 class Compile : public Phase {
+  friend class VMStructs;
+
  public:
   // Fixed alias indexes.  (See also MergeMemNode.)
   enum {
@@ -785,11 +787,14 @@
   // Process an OopMap Element while emitting nodes
   void Process_OopMap_Node(MachNode *mach, int code_offset);
 
+  // Initialize code buffer
+  CodeBuffer* init_buffer(uint* blk_starts);
+
   // Write out basic block data to code buffer
-  void Fill_buffer();
+  void fill_buffer(CodeBuffer* cb, uint* blk_starts);
 
   // Determine which variable sized branches can be shortened
-  void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size);
+  void shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size);
 
   // Compute the size of first NumberOfLoopInstrToAlign instructions
   // at the head of a loop.
--- a/src/share/vm/opto/connode.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/connode.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -496,14 +496,6 @@
   virtual bool depends_only_on_test() const { return false; }
 };
 
-//------------------------------MemMoveNode------------------------------------
-// Memory to memory move.  Inserted very late, after allocation.
-class MemMoveNode : public Node {
-public:
-  MemMoveNode( Node *dst, Node *src ) : Node(0,dst,src) {}
-  virtual int Opcode() const;
-};
-
 //------------------------------ThreadLocalNode--------------------------------
 // Ideal Node which returns the base of ThreadLocalStorage.
 class ThreadLocalNode : public Node {
--- a/src/share/vm/opto/doCall.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/doCall.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -114,7 +114,7 @@
     if (cg != NULL)  return cg;
   }
 
-  // Do MethodHandle calls.
+  // Do method handle calls.
   // NOTE: This must happen before normal inlining logic below since
   // MethodHandle.invoke* are native methods which obviously don't
   // have bytecodes and so normal inlining fails.
@@ -127,33 +127,19 @@
       if (cg != NULL) {
         return cg;
       }
-
       return CallGenerator::for_direct_call(call_method);
     }
     else {
-      // Get the MethodHandle from the CallSite.
+      // Get the CallSite object.
       ciMethod* caller_method = jvms->method();
       ciBytecodeStream str(caller_method);
       str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
-      ciCallSite*     call_site     = str.get_call_site();
-      ciMethodHandle* method_handle = call_site->get_target();
-
-      // Set the callee to have access to the class and signature in
-      // the MethodHandleCompiler.
-      method_handle->set_callee(call_method);
-      method_handle->set_caller(caller);
-      method_handle->set_call_profile(profile);
+      ciCallSite* call_site = str.get_call_site();
 
-      // Get an adapter for the MethodHandle.
-      ciMethod* target_method = method_handle->get_invokedynamic_adapter();
-      if (target_method != NULL) {
-        CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
-        if (hit_cg != NULL && hit_cg->is_inline()) {
-          CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
-          return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
-        }
+      CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, call_method, profile);
+      if (cg != NULL) {
+        return cg;
       }
-
       // If something failed, generate a normal dynamic call.
       return CallGenerator::for_dynamic_call(call_method);
     }
--- a/src/share/vm/opto/gcm.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/gcm.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1137,7 +1137,7 @@
 
     // No uses, just terminate
     if (self->outcnt() == 0) {
-      assert(self->Opcode() == Op_MachProj, "sanity");
+      assert(self->is_MachProj(), "sanity");
       continue;                   // Must be a dead machine projection
     }
 
--- a/src/share/vm/opto/graphKit.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/graphKit.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2856,7 +2856,7 @@
   // lock has no side-effects, sets few values
   set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
 
-  insert_mem_bar(Op_MemBarAcquire);
+  insert_mem_bar(Op_MemBarAcquireLock);
 
   // Add this to the worklist so that the lock can be eliminated
   record_for_igvn(lock);
@@ -2889,7 +2889,7 @@
   }
 
   // Memory barrier to avoid floating things down past the locked region
-  insert_mem_bar(Op_MemBarRelease);
+  insert_mem_bar(Op_MemBarReleaseLock);
 
   const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
   UnlockNode *unlock = new (C, tf->domain()->cnt()) UnlockNode(C, tf);
--- a/src/share/vm/opto/idealGraphPrinter.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/idealGraphPrinter.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -375,9 +375,9 @@
   return (intptr_t)(n);
 }
 
-void IdealGraphPrinter::visit_node(Node *n, void *param) {
+void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
 
-  if(param) {
+  if (edges) {
 
     // Output edge
     intptr_t dest_id = get_node_id(n);
@@ -426,9 +426,6 @@
     if (flags & Node::Flag_is_Copy) {
       print_prop("is_copy", "true");
     }
-    if (flags & Node::Flag_is_Call) {
-      print_prop("is_call", "true");
-    }
     if (flags & Node::Flag_rematerialize) {
       print_prop("rematerialize", "true");
     }
@@ -444,27 +441,12 @@
     if (flags & Node::Flag_is_cisc_alternate) {
       print_prop("is_cisc_alternate", "true");
     }
-    if (flags & Node::Flag_is_Branch) {
-      print_prop("is_branch", "true");
-    }
-    if (flags & Node::Flag_is_block_start) {
-      print_prop("is_block_start", "true");
-    }
-    if (flags & Node::Flag_is_Goto) {
-      print_prop("is_goto", "true");
-    }
     if (flags & Node::Flag_is_dead_loop_safe) {
       print_prop("is_dead_loop_safe", "true");
     }
     if (flags & Node::Flag_may_be_short_branch) {
       print_prop("may_be_short_branch", "true");
     }
-    if (flags & Node::Flag_is_safepoint_node) {
-      print_prop("is_safepoint_node", "true");
-    }
-    if (flags & Node::Flag_is_pc_relative) {
-      print_prop("is_pc_relative", "true");
-    }
 
     if (C->matcher() != NULL) {
       if (C->matcher()->is_shared(node)) {
@@ -617,16 +599,11 @@
 
 #ifdef ASSERT
     if (node->debug_orig() != NULL) {
+      temp_set->Clear();
       stringStream dorigStream;
       Node* dorig = node->debug_orig();
-      if (dorig) {
+      while (dorig && temp_set->test_set(dorig->_idx)) {
         dorigStream.print("%d ", dorig->_idx);
-        Node* first = dorig;
-        dorig = first->debug_orig();
-        while (dorig && dorig != first) {
-          dorigStream.print("%d ", dorig->_idx);
-          dorig = dorig->debug_orig();
-        }
       }
       print_prop("debug_orig", dorigStream.as_string());
     }
@@ -647,7 +624,7 @@
   }
 }
 
-void IdealGraphPrinter::walk_nodes(Node *start, void *param) {
+void IdealGraphPrinter::walk_nodes(Node *start, bool edges, VectorSet* temp_set) {
 
 
   VectorSet visited(Thread::current()->resource_area());
@@ -668,7 +645,7 @@
   while(nodeStack.length() > 0) {
 
     Node *n = nodeStack.pop();
-    visit_node(n, param);
+    visit_node(n, edges, temp_set);
 
     if (_traverse_outs) {
       for (DUIterator i = n->outs(); n->has_out(i); i++) {
@@ -707,12 +684,14 @@
   print_attr(GRAPH_NAME_PROPERTY, (const char *)name);
   end_head();
 
+  VectorSet temp_set(Thread::current()->resource_area());
+
   head(NODES_ELEMENT);
-  walk_nodes(node, NULL);
+  walk_nodes(node, false, &temp_set);
   tail(NODES_ELEMENT);
 
   head(EDGES_ELEMENT);
-  walk_nodes(node, (void *)1);
+  walk_nodes(node, true, &temp_set);
   tail(EDGES_ELEMENT);
   if (C->cfg() != NULL) {
     head(CONTROL_FLOW_ELEMENT);
--- a/src/share/vm/opto/idealGraphPrinter.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/idealGraphPrinter.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -104,8 +104,8 @@
   void print_indent();
   void print_method(ciMethod *method, int bci, InlineTree *tree);
   void print_inline_tree(InlineTree *tree);
-  void visit_node(Node *n, void *param);
-  void walk_nodes(Node *start, void *param);
+  void visit_node(Node *n, bool edges, VectorSet* temp_set);
+  void walk_nodes(Node *start, bool edges, VectorSet* temp_set);
   void begin_elem(const char *s);
   void end_elem();
   void begin_head(const char *s);
--- a/src/share/vm/opto/lcm.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/lcm.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -45,6 +45,9 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc
+# include "adfiles/ad_ppc.hpp"
+#endif
 
 // Optimization - Graph Style
 
@@ -322,7 +325,7 @@
       // that also need to be hoisted.
       for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
         Node* n = val->fast_out(j);
-        if( n->Opcode() == Op_MachProj ) {
+        if( n->is_MachProj() ) {
           cfg->_bbs[n->_idx]->find_remove(n);
           this->add_inst(n);
           cfg->_bbs.map(n->_idx,this);
@@ -344,7 +347,7 @@
   // Should be DU safe because no edge updates.
   for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
     Node* n = best->fast_out(j);
-    if( n->Opcode() == Op_MachProj ) {
+    if( n->is_MachProj() ) {
       cfg->_bbs[n->_idx]->find_remove(n);
       add_inst(n);
       cfg->_bbs.map(n->_idx,this);
@@ -536,7 +539,7 @@
     Node* m = this_call->fast_out(i);
     if( bbs[m->_idx] == this && // Local-block user
         m != this_call &&       // Not self-start node
-        m->is_Call() )
+        m->is_MachCall() )
       call = m;
       break;
   }
@@ -554,7 +557,7 @@
   // Collect all the defined registers.
   for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
     Node* n = mcall->fast_out(i);
-    assert( n->Opcode()==Op_MachProj, "" );
+    assert( n->is_MachProj(), "" );
     --ready_cnt[n->_idx];
     assert( !ready_cnt[n->_idx], "" );
     // Schedule next to call
@@ -972,8 +975,8 @@
   if( !_nodes[end]->is_Catch() ) return;
   // Start of region to clone
   uint beg = end;
-  while( _nodes[beg-1]->Opcode() != Op_MachProj ||
-        !_nodes[beg-1]->in(0)->is_Call() ) {
+  while(!_nodes[beg-1]->is_MachProj() ||
+        !_nodes[beg-1]->in(0)->is_MachCall() ) {
     beg--;
     assert(beg > 0,"Catch cleanup walking beyond block boundary");
   }
--- a/src/share/vm/opto/loopPredicate.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/loopPredicate.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -718,7 +718,7 @@
   }
 
   CountedLoopNode *cl = NULL;
-  if (head->is_CountedLoop()) {
+  if (head->is_valid_counted_loop()) {
     cl = head->as_CountedLoop();
     // do nothing for iteration-splitted loops
     if (!cl->is_normal_loop()) return false;
--- a/src/share/vm/opto/loopTransform.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/loopTransform.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -709,10 +709,13 @@
 
   // Adjust body_size to determine if we unroll or not
   uint body_size = _body.size();
+  // Key test to unroll loop in CRC32 java code
+  int xors_in_loop = 0;
   // Also count ModL, DivL and MulL which expand mightly
   for (uint k = 0; k < _body.size(); k++) {
     Node* n = _body.at(k);
     switch (n->Opcode()) {
+      case Op_XorI: xors_in_loop++; break; // CRC32 java code
       case Op_ModL: body_size += 30; break;
       case Op_DivL: body_size += 30; break;
       case Op_MulL: body_size += 10; break;
@@ -729,7 +732,8 @@
 
   // Check for being too big
   if (body_size > (uint)LoopUnrollLimit) {
-     // Normal case: loop too big
+    if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
+    // Normal case: loop too big
     return false;
   }
 
@@ -2099,7 +2103,7 @@
   if (!_head->is_CountedLoop())
     return false;     // Dead loop
   CountedLoopNode *cl = _head->as_CountedLoop();
-  if (!cl->loopexit())
+  if (!cl->is_valid_counted_loop())
     return false; // Malformed loop
   if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
     return false;             // Infinite loop
@@ -2255,7 +2259,7 @@
   }
   CountedLoopNode *cl = _head->as_CountedLoop();
 
-  if (!cl->loopexit()) return true; // Ignore various kinds of broken loops
+  if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops
 
   // Do nothing special to pre- and post- loops
   if (cl->is_pre_loop() || cl->is_post_loop()) return true;
@@ -2636,7 +2640,7 @@
 
   // Must have constant stride
   CountedLoopNode* head = lpt->_head->as_CountedLoop();
-  if (!head->stride_is_con() || !head->is_normal_loop()) {
+  if (!head->is_valid_counted_loop() || !head->is_normal_loop()) {
     return false;
   }
 
--- a/src/share/vm/opto/loopnode.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/loopnode.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -582,20 +582,25 @@
 
   // Build a canonical trip test.
   // Clone code, as old values may be in use.
-  Node* nphi = PhiNode::make(x, init_trip, TypeInt::INT);
-  nphi = _igvn.register_new_node_with_optimizer(nphi);
-  set_ctrl(nphi, get_ctrl(phi));
-
   incr = incr->clone();
-  incr->set_req(1,nphi);
+  incr->set_req(1,phi);
   incr->set_req(2,stride);
   incr = _igvn.register_new_node_with_optimizer(incr);
   set_early_ctrl( incr );
-
-  nphi->set_req(LoopNode::LoopBackControl, incr);
-  _igvn.replace_node(phi, nphi);
-  phi = nphi->as_Phi();
-
+  _igvn.hash_delete(phi);
+  phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn );
+
+  // If phi type is more restrictive than Int, raise to
+  // Int to prevent (almost) infinite recursion in igvn
+  // which can only handle integer types for constants or minint..maxint.
+  if (!TypeInt::INT->higher_equal(phi->bottom_type())) {
+    Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInt::INT);
+    nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
+    nphi = _igvn.register_new_node_with_optimizer(nphi);
+    set_ctrl(nphi, get_ctrl(phi));
+    _igvn.replace_node(phi, nphi);
+    phi = nphi->as_Phi();
+  }
   cmp = cmp->clone();
   cmp->set_req(1,incr);
   cmp->set_req(2,limit);
@@ -689,6 +694,7 @@
 Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
   assert(loop->_head->is_CountedLoop(), "");
   CountedLoopNode *cl = loop->_head->as_CountedLoop();
+  assert(cl->is_valid_counted_loop(), "");
 
   if (!LoopLimitCheck || ABS(cl->stride_con()) == 1 ||
       cl->limit()->Opcode() == Op_LoopLimit) {
@@ -709,7 +715,6 @@
     long limit_con = cl->limit()->get_int();
     julong trip_cnt = cl->trip_count();
     long final_con = init_con + trip_cnt*stride_con;
-    final_con -= stride_con;
     int final_int = (int)final_con;
     // The final value should be in integer range since the loop
     // is counted and the limit was checked for overflow.
@@ -1608,18 +1613,15 @@
 void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) {
   assert(loop->_head->is_CountedLoop(), "");
   CountedLoopNode *cl = loop->_head->as_CountedLoop();
+  if (!cl->is_valid_counted_loop())
+    return;         // skip malformed counted loop
   Node *incr = cl->incr();
   if (incr == NULL)
     return;         // Dead loop?
   Node *init = cl->init_trip();
   Node *phi  = cl->phi();
-  // protect against stride not being a constant
-  if (!cl->stride_is_con())
-    return;
   int stride_con = cl->stride_con();
 
-  PhaseGVN *gvn = &_igvn;
-
   // Visit all children, looking for Phis
   for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
     Node *out = cl->out(i);
@@ -1655,25 +1657,31 @@
     int ratio_con = stride_con2/stride_con;
 
     if ((ratio_con * stride_con) == stride_con2) { // Check for exact
+#ifndef PRODUCT
+      if (TraceLoopOpts) {
+        tty->print("Parallel IV: %d ", phi2->_idx);
+        loop->dump_head();
+      }
+#endif
       // Convert to using the trip counter.  The parallel induction
       // variable differs from the trip counter by a loop-invariant
       // amount, the difference between their respective initial values.
       // It is scaled by the 'ratio_con'.
-      // Perform local Ideal transformation since in most cases ratio == 1.
       Node* ratio = _igvn.intcon(ratio_con);
       set_ctrl(ratio, C->root());
-      Node* hook = new (C, 3) Node(3);
-      Node* ratio_init = gvn->transform(new (C, 3) MulINode(init, ratio));
-      hook->init_req(0, ratio_init);
-      Node* diff = gvn->transform(new (C, 3) SubINode(init2, ratio_init));
-      hook->init_req(1, diff);
-      Node* ratio_idx = gvn->transform(new (C, 3) MulINode(phi, ratio));
-      hook->init_req(2, ratio_idx);
-      Node* add  = gvn->transform(new (C, 3) AddINode(ratio_idx, diff));
-      set_subtree_ctrl(add);
+      Node* ratio_init = new (C, 3) MulINode(init, ratio);
+      _igvn.register_new_node_with_optimizer(ratio_init, init);
+      set_early_ctrl(ratio_init);
+      Node* diff = new (C, 3) SubINode(init2, ratio_init);
+      _igvn.register_new_node_with_optimizer(diff, init2);
+      set_early_ctrl(diff);
+      Node* ratio_idx = new (C, 3) MulINode(phi, ratio);
+      _igvn.register_new_node_with_optimizer(ratio_idx, phi);
+      set_ctrl(ratio_idx, cl);
+      Node* add = new (C, 3) AddINode(ratio_idx, diff);
+      _igvn.register_new_node_with_optimizer(add);
+      set_ctrl(add, cl);
       _igvn.replace_node( phi2, add );
-      // Free up intermediate goo
-      _igvn.remove_dead_node(hook);
       // Sometimes an induction variable is unused
       if (add->outcnt() == 0) {
         _igvn.remove_dead_node(add);
--- a/src/share/vm/opto/machnode.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/machnode.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -389,12 +389,6 @@
 }
 
 
-//------------------------------negate-----------------------------------------
-// Negate conditional branches.  Error for non-branch Nodes
-void MachNode::negate() {
-  ShouldNotCallThis();
-}
-
 //------------------------------peephole---------------------------------------
 // Apply peephole rule(s) to this instruction
 MachNode *MachNode::peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C ) {
@@ -407,12 +401,6 @@
   ShouldNotCallThis();
 }
 
-//------------------------------label_set--------------------------------------
-// Set the Label for a LabelOper, if an operand for this instruction
-void MachNode::label_set( Label& label, uint block_num ) {
-  ShouldNotCallThis();
-}
-
 //------------------------------method_set-------------------------------------
 // Set the absolute address of a method
 void MachNode::method_set( intptr_t addr ) {
@@ -514,6 +502,12 @@
 void MachNullCheckNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
   // only emits entries in the null-pointer exception handler table
 }
+void MachNullCheckNode::label_set(Label* label, uint block_num) {
+  // Nothing to emit
+}
+void MachNullCheckNode::save_label( Label** label, uint* block_num ) {
+  // Nothing to emit
+}
 
 const RegMask &MachNullCheckNode::in_RegMask( uint idx ) const {
   if( idx == 0 ) return RegMask::Empty;
--- a/src/share/vm/opto/machnode.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/machnode.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -185,9 +185,11 @@
   virtual void use_cisc_RegMask();
 
   // Support for short branches
-  virtual MachNode *short_branch_version(Compile* C) { return NULL; }
   bool may_be_short_branch() const { return (flags() & Flag_may_be_short_branch) != 0; }
 
+  // Avoid back to back some instructions on some CPUs.
+  bool avoid_back_to_back() const { return (flags() & Flag_avoid_back_to_back) != 0; }
+
   // First index in _in[] corresponding to operand, or -1 if there is none
   int  operand_index(uint operand) const;
 
@@ -269,21 +271,12 @@
   // Call "get_base_and_disp" to decide which category of memory is used here.
   virtual const class TypePtr *adr_type() const;
 
-  // Negate conditional branches.  Error for non-branch Nodes
-  virtual void negate();
-
   // Apply peephole rule(s) to this instruction
   virtual MachNode *peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C );
 
-  // Check for PC-Relative addressing
-  bool is_pc_relative() const { return (flags() & Flag_is_pc_relative) != 0; }
-
   // Top-level ideal Opcode matched
   virtual int ideal_Opcode()     const { return Op_Node; }
 
-  // Set the branch inside jump MachNodes.  Error for non-branch Nodes.
-  virtual void label_set( Label& label, uint block_num );
-
   // Adds the label for the case
   virtual void add_case_label( int switch_val, Label* blockLabel);
 
@@ -514,24 +507,41 @@
 #endif
 };
 
+//------------------------------MachBranchNode--------------------------------
+// Abstract machine branch Node
+class MachBranchNode : public MachIdealNode {
+public:
+  MachBranchNode() : MachIdealNode() {
+    init_class_id(Class_MachBranch);
+  }
+  virtual void label_set(Label* label, uint block_num) = 0;
+  virtual void save_label(Label** label, uint* block_num) = 0;
+
+  // Support for short branches
+  virtual MachNode *short_branch_version(Compile* C) { return NULL; }
+
+  virtual bool pinned() const { return true; };
+};
+
 //------------------------------MachNullChkNode--------------------------------
 // Machine-dependent null-pointer-check Node.  Points a real MachNode that is
 // also some kind of memory op.  Turns the indicated MachNode into a
 // conditional branch with good latency on the ptr-not-null path and awful
 // latency on the pointer-is-null path.
 
-class MachNullCheckNode : public MachIdealNode {
+class MachNullCheckNode : public MachBranchNode {
 public:
   const uint _vidx;             // Index of memop being tested
-  MachNullCheckNode( Node *ctrl, Node *memop, uint vidx ) : MachIdealNode(), _vidx(vidx) {
+  MachNullCheckNode( Node *ctrl, Node *memop, uint vidx ) : MachBranchNode(), _vidx(vidx) {
     init_class_id(Class_MachNullCheck);
-    init_flags(Flag_is_Branch | Flag_is_pc_relative);
     add_req(ctrl);
     add_req(memop);
   }
+  virtual uint size_of() const { return sizeof(*this); }
 
   virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
-  virtual bool pinned() const { return true; };
+  virtual void label_set(Label* label, uint block_num);
+  virtual void save_label(Label** label, uint* block_num);
   virtual void negate() { }
   virtual const class Type *bottom_type() const { return TypeTuple::IFBOTH; }
   virtual uint ideal_reg() const { return NotAMachineReg; }
@@ -553,7 +563,9 @@
 // occasional callbacks to the machine model for important info.
 class MachProjNode : public ProjNode {
 public:
-  MachProjNode( Node *multi, uint con, const RegMask &out, uint ideal_reg ) : ProjNode(multi,con), _rout(out), _ideal_reg(ideal_reg) {}
+  MachProjNode( Node *multi, uint con, const RegMask &out, uint ideal_reg ) : ProjNode(multi,con), _rout(out), _ideal_reg(ideal_reg) {
+    init_class_id(Class_MachProj);
+  }
   RegMask _rout;
   const uint  _ideal_reg;
   enum projType {
@@ -575,19 +587,30 @@
 
 //------------------------------MachIfNode-------------------------------------
 // Machine-specific versions of IfNodes
-class MachIfNode : public MachNode {
+class MachIfNode : public MachBranchNode {
   virtual uint size_of() const { return sizeof(*this); } // Size is bigger
 public:
   float _prob;                  // Probability branch goes either way
   float _fcnt;                  // Frequency counter
-  MachIfNode() : MachNode() {
+  MachIfNode() : MachBranchNode() {
     init_class_id(Class_MachIf);
   }
+  // Negate conditional branches.
+  virtual void negate() = 0;
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const;
 #endif
 };
 
+//------------------------------MachGotoNode-----------------------------------
+// Machine-specific versions of GotoNodes
+class MachGotoNode : public MachBranchNode {
+public:
+  MachGotoNode() : MachBranchNode() {
+    init_class_id(Class_MachGoto);
+  }
+};
+
 //------------------------------MachFastLockNode-------------------------------------
 // Machine-specific versions of FastLockNodes
 class MachFastLockNode : public MachNode {
@@ -630,14 +653,12 @@
 
   MachSafePointNode() : MachReturnNode(), _oop_map(NULL), _jvms(NULL), _jvmadj(0) {
     init_class_id(Class_MachSafePoint);
-    init_flags(Flag_is_safepoint_node);
   }
 
   virtual JVMState* jvms() const { return _jvms; }
   void set_jvms(JVMState* s) {
     _jvms = s;
   }
-  bool is_safepoint_node() const { return (flags() & Flag_is_safepoint_node) != 0; }
   virtual const Type    *bottom_type() const;
 
   virtual const RegMask &in_RegMask(uint) const;
@@ -701,7 +722,6 @@
 
   MachCallNode() : MachSafePointNode() {
     init_class_id(Class_MachCall);
-    init_flags(Flag_is_Call);
   }
 
   virtual const Type *bottom_type() const;
@@ -853,7 +873,7 @@
 
   virtual MachOper *clone(Compile* C) const;
 
-  virtual Label *label() const { return _label; }
+  virtual Label *label() const { assert(_label != NULL, "need Label"); return _label; }
 
   virtual uint           opcode() const;
 
--- a/src/share/vm/opto/macro.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/macro.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1590,7 +1590,7 @@
         prefetch_adr = new (C, 4) AddPNode( old_pf_wm, new_pf_wmt,
                                             _igvn.MakeConX(distance) );
         transform_later(prefetch_adr);
-        prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr );
+        prefetch = new (C, 3) PrefetchAllocationNode( i_o, prefetch_adr );
         transform_later(prefetch);
         distance += step_size;
         i_o = prefetch;
@@ -1611,13 +1611,14 @@
       contended_phi_rawmem = pf_phi_rawmem;
       i_o = pf_phi_abio;
    } else if( UseTLAB && AllocatePrefetchStyle == 3 ) {
-      // Insert a prefetch for each allocation only on the fast-path
+      // Insert a prefetch for each allocation.
+      // This code is used for Sparc with BIS.
       Node *pf_region = new (C, 3) RegionNode(3);
       Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY,
                                                 TypeRawPtr::BOTTOM );
 
-      // Generate several prefetch instructions only for arrays.
-      uint lines = (length != NULL) ? AllocatePrefetchLines : 1;
+      // Generate several prefetch instructions.
+      uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
       uint step_size = AllocatePrefetchStepSize;
       uint distance = AllocatePrefetchDistance;
 
@@ -1634,7 +1635,7 @@
       transform_later(cache_adr);
 
       // Prefetch
-      Node *prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, cache_adr );
+      Node *prefetch = new (C, 3) PrefetchAllocationNode( contended_phi_rawmem, cache_adr );
       prefetch->set_req(0, needgc_false);
       transform_later(prefetch);
       contended_phi_rawmem = prefetch;
@@ -1644,7 +1645,7 @@
         prefetch_adr = new (C, 4) AddPNode( cache_adr, cache_adr,
                                             _igvn.MakeConX(distance) );
         transform_later(prefetch_adr);
-        prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, prefetch_adr );
+        prefetch = new (C, 3) PrefetchAllocationNode( contended_phi_rawmem, prefetch_adr );
         transform_later(prefetch);
         distance += step_size;
         contended_phi_rawmem = prefetch;
@@ -1653,15 +1654,15 @@
       // Insert a prefetch for each allocation only on the fast-path
       Node *prefetch_adr;
       Node *prefetch;
-      // Generate several prefetch instructions only for arrays.
-      uint lines = (length != NULL) ? AllocatePrefetchLines : 1;
+      // Generate several prefetch instructions.
+      uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
       uint step_size = AllocatePrefetchStepSize;
       uint distance = AllocatePrefetchDistance;
       for ( uint i = 0; i < lines; i++ ) {
         prefetch_adr = new (C, 4) AddPNode( old_eden_top, new_eden_top,
                                             _igvn.MakeConX(distance) );
         transform_later(prefetch_adr);
-        prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr );
+        prefetch = new (C, 3) PrefetchAllocationNode( i_o, prefetch_adr );
         // Do not let it float too high, since if eden_top == eden_end,
         // both might be null.
         if( i == 0 ) { // Set control for first prefetch, next follows it
@@ -1816,9 +1817,9 @@
   // The input to a Lock is merged memory, so extract its RawMem input
   // (unless the MergeMem has been optimized away.)
   if (alock->is_Lock()) {
-    // Seach for MemBarAcquire node and delete it also.
+    // Seach for MemBarAcquireLock node and delete it also.
     MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
-    assert(membar != NULL && membar->Opcode() == Op_MemBarAcquire, "");
+    assert(membar != NULL && membar->Opcode() == Op_MemBarAcquireLock, "");
     Node* ctrlproj = membar->proj_out(TypeFunc::Control);
     Node* memproj = membar->proj_out(TypeFunc::Memory);
     _igvn.replace_node(ctrlproj, fallthroughproj);
@@ -1833,11 +1834,11 @@
     }
   }
 
-  // Seach for MemBarRelease node and delete it also.
+  // Seach for MemBarReleaseLock node and delete it also.
   if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() &&
       ctrl->in(0)->is_MemBar()) {
     MemBarNode* membar = ctrl->in(0)->as_MemBar();
-    assert(membar->Opcode() == Op_MemBarRelease &&
+    assert(membar->Opcode() == Op_MemBarReleaseLock &&
            mem->is_Proj() && membar == mem->in(0), "");
     _igvn.replace_node(fallthroughproj, ctrl);
     _igvn.replace_node(memproj_fallthrough, mem);
--- a/src/share/vm/opto/matcher.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/matcher.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -52,6 +52,9 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc
+# include "adfiles/ad_ppc.hpp"
+#endif
 
 OptoReg::Name OptoReg::c_frame_pointer;
 
@@ -498,6 +501,12 @@
      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
 #else
      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
+#ifdef ARM
+     // ARM has support for moving 64bit values between a pair of
+     // integer registers and a double register
+     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
+     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
+#endif
 #endif
    }
 
@@ -823,6 +832,7 @@
     switch (n->Opcode()) {
     case Op_PrefetchRead:
     case Op_PrefetchWrite:
+    case Op_PrefetchAllocation:
       nidx = Compile::AliasIdxRaw;
       nat = TypeRawPtr::BOTTOM;
       break;
@@ -1102,6 +1112,9 @@
       mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
       is_method_handle_invoke = call_java->is_method_handle_invoke();
       mcall_java->_method_handle_invoke = is_method_handle_invoke;
+      if (is_method_handle_invoke) {
+        C->set_has_method_handle_invokes(true);
+      }
       if( mcall_java->is_MachCallStaticJava() )
         mcall_java->as_MachCallStaticJava()->_name =
          call_java->as_CallStaticJava()->_name;
@@ -2227,57 +2240,6 @@
   }
 }
 
-
-// Used by the DFA in dfa_sparc.cpp.  Check for a prior FastLock
-// acting as an Acquire and thus we don't need an Acquire here.  We
-// retain the Node to act as a compiler ordering barrier.
-bool Matcher::prior_fast_lock( const Node *acq ) {
-  Node *r = acq->in(0);
-  if( !r->is_Region() || r->req() <= 1 ) return false;
-  Node *proj = r->in(1);
-  if( !proj->is_Proj() ) return false;
-  Node *call = proj->in(0);
-  if( !call->is_Call() || call->as_Call()->entry_point() != OptoRuntime::complete_monitor_locking_Java() )
-    return false;
-
-  return true;
-}
-
-// Used by the DFA in dfa_sparc.cpp.  Check for a following FastUnLock
-// acting as a Release and thus we don't need a Release here.  We
-// retain the Node to act as a compiler ordering barrier.
-bool Matcher::post_fast_unlock( const Node *rel ) {
-  Compile *C = Compile::current();
-  assert( rel->Opcode() == Op_MemBarRelease, "" );
-  const MemBarReleaseNode *mem = (const MemBarReleaseNode*)rel;
-  DUIterator_Fast imax, i = mem->fast_outs(imax);
-  Node *ctrl = NULL;
-  while( true ) {
-    ctrl = mem->fast_out(i);            // Throw out-of-bounds if proj not found
-    assert( ctrl->is_Proj(), "only projections here" );
-    ProjNode *proj = (ProjNode*)ctrl;
-    if( proj->_con == TypeFunc::Control &&
-        !C->node_arena()->contains(ctrl) ) // Unmatched old-space only
-      break;
-    i++;
-  }
-  Node *iff = NULL;
-  for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
-    Node *x = ctrl->fast_out(j);
-    if( x->is_If() && x->req() > 1 &&
-        !C->node_arena()->contains(x) ) { // Unmatched old-space only
-      iff = x;
-      break;
-    }
-  }
-  if( !iff ) return false;
-  Node *bol = iff->in(1);
-  // The iff might be some random subclass of If or bol might be Con-Top
-  if (!bol->is_Bool())  return false;
-  assert( bol->req() > 1, "" );
-  return (bol->in(1)->Opcode() == Op_FastUnlock);
-}
-
 // Used by the DFA in dfa_xxx.cpp.  Check for a following barrier or
 // atomic instruction acting as a store_load barrier without any
 // intervening volatile load, and thus we don't need a barrier here.
--- a/src/share/vm/opto/matcher.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/matcher.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -351,7 +351,7 @@
   virtual int      regnum_to_fpu_offset(int regnum);
 
   // Is this branch offset small enough to be addressed by a short branch?
-  bool is_short_branch_offset(int rule, int offset);
+  bool is_short_branch_offset(int rule, int br_size, int offset);
 
   // Optional scaling for the parameter to the ClearArray/CopyArray node.
   static const bool init_array_count_is_in_bytes;
@@ -441,16 +441,6 @@
     else { fatal("SoftMatchFailure is not allowed except in product"); }
   }
 
-  // Used by the DFA in dfa_sparc.cpp.  Check for a prior FastLock
-  // acting as an Acquire and thus we don't need an Acquire here.  We
-  // retain the Node to act as a compiler ordering barrier.
-  static bool prior_fast_lock( const Node *acq );
-
-  // Used by the DFA in dfa_sparc.cpp.  Check for a following
-  // FastUnLock acting as a Release and thus we don't need a Release
-  // here.  We retain the Node to act as a compiler ordering barrier.
-  static bool post_fast_unlock( const Node *rel );
-
   // Check for a following volatile memory barrier without an
   // intervening load and thus we don't need a barrier here.  We
   // retain the Node to act as a compiler ordering barrier.
--- a/src/share/vm/opto/memnode.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/memnode.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -925,8 +925,9 @@
     // a synchronized region.
     while (current->is_Proj()) {
       int opc = current->in(0)->Opcode();
-      if ((final && opc == Op_MemBarAcquire) ||
-          opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder) {
+      if ((final && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock)) ||
+          opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder ||
+          opc == Op_MemBarReleaseLock) {
         Node* mem = current->in(0)->in(TypeFunc::Memory);
         if (mem->is_MergeMem()) {
           MergeMemNode* merge = mem->as_MergeMem();
@@ -1492,6 +1493,7 @@
   if (tp == NULL || tp->empty())  return Type::TOP;
   int off = tp->offset();
   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
+  Compile* C = phase->C;
 
   // Try to guess loaded type from pointer type
   if (tp->base() == Type::AryPtr) {
@@ -1535,7 +1537,7 @@
           Node* base = adr->in(AddPNode::Base);
           if (base != NULL &&
               !phase->type(base)->higher_equal(TypePtr::NULL_PTR)) {
-            Compile::AliasType* atp = phase->C->alias_type(base->adr_type());
+            Compile::AliasType* atp = C->alias_type(base->adr_type());
             if (is_autobox_cache(atp)) {
               return jt->join(TypePtr::NOTNULL)->is_ptr();
             }
@@ -1545,22 +1547,23 @@
       }
     }
   } else if (tp->base() == Type::InstPtr) {
+    ciEnv* env = C->env();
     const TypeInstPtr* tinst = tp->is_instptr();
     ciKlass* klass = tinst->klass();
     assert( off != Type::OffsetBot ||
             // arrays can be cast to Objects
             tp->is_oopptr()->klass()->is_java_lang_Object() ||
             // unsafe field access may not have a constant offset
-            phase->C->has_unsafe_access(),
+            C->has_unsafe_access(),
             "Field accesses must be precise" );
     // For oop loads, we expect the _type to be precise
-    if (klass == phase->C->env()->String_klass() &&
+    if (klass == env->String_klass() &&
         adr->is_AddP() && off != Type::OffsetBot) {
       // For constant Strings treat the final fields as compile time constants.
       Node* base = adr->in(AddPNode::Base);
       const TypeOopPtr* t = phase->type(base)->isa_oopptr();
       if (t != NULL && t->singleton()) {
-        ciField* field = phase->C->env()->String_klass()->get_field_by_offset(off, false);
+        ciField* field = env->String_klass()->get_field_by_offset(off, false);
         if (field != NULL && field->is_final()) {
           ciObject* string = t->const_oop();
           ciConstant constant = string->as_instance()->field_value(field);
@@ -1576,6 +1579,32 @@
         }
       }
     }
+    // Optimizations for constant objects
+    ciObject* const_oop = tinst->const_oop();
+    if (const_oop != NULL) {
+      // For constant CallSites treat the target field as a compile time constant.
+      if (const_oop->is_call_site()) {
+        ciCallSite* call_site = const_oop->as_call_site();
+        ciField* field = call_site->klass()->as_instance_klass()->get_field_by_offset(off, /*is_static=*/ false);
+        if (field != NULL && field->is_call_site_target()) {
+          ciMethodHandle* target = call_site->get_target();
+          if (target != NULL) {  // just in case
+            ciConstant constant(T_OBJECT, target);
+            const Type* t;
+            if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+              t = TypeNarrowOop::make_from_constant(constant.as_object(), true);
+            } else {
+              t = TypeOopPtr::make_from_constant(constant.as_object(), true);
+            }
+            // Add a dependence for invalidation of the optimization.
+            if (!call_site->is_constant_call_site()) {
+              C->dependencies()->assert_call_site_target_value(call_site, target);
+            }
+            return t;
+          }
+        }
+      }
+    }
   } else if (tp->base() == Type::KlassPtr) {
     assert( off != Type::OffsetBot ||
             // arrays can be cast to Objects
@@ -2666,6 +2695,8 @@
   switch (opcode) {
   case Op_MemBarAcquire:   return new(C, len) MemBarAcquireNode(C,  atp, pn);
   case Op_MemBarRelease:   return new(C, len) MemBarReleaseNode(C,  atp, pn);
+  case Op_MemBarAcquireLock: return new(C, len) MemBarAcquireLockNode(C,  atp, pn);
+  case Op_MemBarReleaseLock: return new(C, len) MemBarReleaseLockNode(C,  atp, pn);
   case Op_MemBarVolatile:  return new(C, len) MemBarVolatileNode(C, atp, pn);
   case Op_MemBarCPUOrder:  return new(C, len) MemBarCPUOrderNode(C, atp, pn);
   case Op_Initialize:      return new(C, len) InitializeNode(C,     atp, pn);
--- a/src/share/vm/opto/memnode.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/memnode.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -879,7 +879,7 @@
 
 // "Acquire" - no following ref can move before (but earlier refs can
 // follow, like an early Load stalled in cache).  Requires multi-cpu
-// visibility.  Inserted after a volatile load or FastLock.
+// visibility.  Inserted after a volatile load.
 class MemBarAcquireNode: public MemBarNode {
 public:
   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
@@ -889,7 +889,7 @@
 
 // "Release" - no earlier ref can move after (but later refs can move
 // up, like a speculative pipelined cache-hitting Load).  Requires
-// multi-cpu visibility.  Inserted before a volatile store or FastUnLock.
+// multi-cpu visibility.  Inserted before a volatile store.
 class MemBarReleaseNode: public MemBarNode {
 public:
   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
@@ -897,6 +897,26 @@
   virtual int Opcode() const;
 };
 
+// "Acquire" - no following ref can move before (but earlier refs can
+// follow, like an early Load stalled in cache).  Requires multi-cpu
+// visibility.  Inserted after a FastLock.
+class MemBarAcquireLockNode: public MemBarNode {
+public:
+  MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
+    : MemBarNode(C, alias_idx, precedent) {}
+  virtual int Opcode() const;
+};
+
+// "Release" - no earlier ref can move after (but later refs can move
+// up, like a speculative pipelined cache-hitting Load).  Requires
+// multi-cpu visibility.  Inserted before a FastUnLock.
+class MemBarReleaseLockNode: public MemBarNode {
+public:
+  MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
+    : MemBarNode(C, alias_idx, precedent) {}
+  virtual int Opcode() const;
+};
+
 // Ordering between a volatile store and a following volatile load.
 // Requires multi-CPU visibility?
 class MemBarVolatileNode: public MemBarNode {
@@ -1258,6 +1278,16 @@
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return NotAMachineReg; }
   virtual uint match_edge(uint idx) const { return idx==2; }
+  virtual const Type *bottom_type() const { return Type::ABIO; }
+};
+
+// Allocation prefetch which may fault, TLAB size have to be adjusted.
+class PrefetchAllocationNode : public Node {
+public:
+  PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
+  virtual int Opcode() const;
+  virtual uint ideal_reg() const { return NotAMachineReg; }
+  virtual uint match_edge(uint idx) const { return idx==2; }
   virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
 };
 
--- a/src/share/vm/opto/mulnode.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/mulnode.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -98,7 +98,7 @@
       const Type *t12 = phase->type( mul1->in(2) );
       if( t12->singleton() && t12 != Type::TOP) { // Left input is an add of a constant?
         // Compute new constant; check for overflow
-        const Type *tcon01 = mul1->as_Mul()->mul_ring(t2,t12);
+        const Type *tcon01 = ((MulNode*)mul1)->mul_ring(t2,t12);
         if( tcon01->singleton() ) {
           // The Mul of the flattened expression
           set_req(1, mul1->in(1));
--- a/src/share/vm/opto/mulnode.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/mulnode.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -41,9 +41,7 @@
 class MulNode : public Node {
   virtual uint hash() const;
 public:
-  MulNode( Node *in1, Node *in2 ): Node(0,in1,in2) {
-    init_class_id(Class_Mul);
-  }
+  MulNode( Node *in1, Node *in2 ): Node(0,in1,in2) {}
 
   // Handle algebraic identities here.  If we have an identity, return the Node
   // we are equivalent to.  We look for "add of zero" as an identity.
--- a/src/share/vm/opto/node.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/node.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,6 +67,8 @@
 class FastLockNode;
 class FastUnlockNode;
 class IfNode;
+class IfFalseNode;
+class IfTrueNode;
 class InitializeNode;
 class JVMState;
 class JumpNode;
@@ -75,6 +77,7 @@
 class LoadStoreNode;
 class LockNode;
 class LoopNode;
+class MachBranchNode;
 class MachCallDynamicJavaNode;
 class MachCallJavaNode;
 class MachCallLeafNode;
@@ -83,9 +86,11 @@
 class MachCallStaticJavaNode;
 class MachConstantBaseNode;
 class MachConstantNode;
+class MachGotoNode;
 class MachIfNode;
 class MachNode;
 class MachNullCheckNode;
+class MachProjNode;
 class MachReturnNode;
 class MachSafePointNode;
 class MachSpillCopyNode;
@@ -94,7 +99,6 @@
 class MemBarNode;
 class MemNode;
 class MergeMemNode;
-class MulNode;
 class MultiNode;
 class MultiBranchNode;
 class NeverBranchNode;
@@ -127,9 +131,10 @@
 class Type;
 class TypeNode;
 class UnlockNode;
+class VectorNode;
+class VectorLoadNode;
+class VectorStoreNode;
 class VectorSet;
-class IfTrueNode;
-class IfFalseNode;
 typedef void (*NFunc)(Node&,void*);
 extern "C" {
   typedef int (*C_sort_func_t)(const void *, const void *);
@@ -179,6 +184,8 @@
 // whenever I have phase-specific information.
 
 class Node {
+  friend class VMStructs;
+
   // Lots of restrictions on cloning Nodes
   Node(const Node&);            // not defined; linker error to use these
   Node &operator=(const Node &rhs);
@@ -568,31 +575,16 @@
               DEFINE_CLASS_ID(MachCallDynamicJava,  MachCallJava, 1)
             DEFINE_CLASS_ID(MachCallRuntime,      MachCall, 1)
               DEFINE_CLASS_ID(MachCallLeaf,         MachCallRuntime, 0)
-      DEFINE_CLASS_ID(MachSpillCopy,    Mach, 1)
-      DEFINE_CLASS_ID(MachNullCheck,    Mach, 2)
-      DEFINE_CLASS_ID(MachIf,           Mach, 3)
-      DEFINE_CLASS_ID(MachTemp,         Mach, 4)
-      DEFINE_CLASS_ID(MachConstantBase, Mach, 5)
-      DEFINE_CLASS_ID(MachConstant,     Mach, 6)
+      DEFINE_CLASS_ID(MachBranch, Mach, 1)
+        DEFINE_CLASS_ID(MachIf,         MachBranch, 0)
+        DEFINE_CLASS_ID(MachGoto,       MachBranch, 1)
+        DEFINE_CLASS_ID(MachNullCheck,  MachBranch, 2)
+      DEFINE_CLASS_ID(MachSpillCopy,    Mach, 2)
+      DEFINE_CLASS_ID(MachTemp,         Mach, 3)
+      DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
+      DEFINE_CLASS_ID(MachConstant,     Mach, 5)
 
-    DEFINE_CLASS_ID(Proj,  Node, 2)
-      DEFINE_CLASS_ID(CatchProj, Proj, 0)
-      DEFINE_CLASS_ID(JumpProj,  Proj, 1)
-      DEFINE_CLASS_ID(IfTrue,    Proj, 2)
-      DEFINE_CLASS_ID(IfFalse,   Proj, 3)
-      DEFINE_CLASS_ID(Parm,      Proj, 4)
-
-    DEFINE_CLASS_ID(Region, Node, 3)
-      DEFINE_CLASS_ID(Loop, Region, 0)
-        DEFINE_CLASS_ID(Root,        Loop, 0)
-        DEFINE_CLASS_ID(CountedLoop, Loop, 1)
-
-    DEFINE_CLASS_ID(Sub,   Node, 4)
-      DEFINE_CLASS_ID(Cmp,   Sub, 0)
-        DEFINE_CLASS_ID(FastLock,   Cmp, 0)
-        DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
-
-    DEFINE_CLASS_ID(Type,  Node, 5)
+    DEFINE_CLASS_ID(Type,  Node, 2)
       DEFINE_CLASS_ID(Phi,   Type, 0)
       DEFINE_CLASS_ID(ConstraintCast, Type, 1)
       DEFINE_CLASS_ID(CheckCastPP, Type, 2)
@@ -601,17 +593,37 @@
       DEFINE_CLASS_ID(DecodeN, Type, 5)
       DEFINE_CLASS_ID(EncodeP, Type, 6)
 
-    DEFINE_CLASS_ID(Mem,   Node, 6)
+    DEFINE_CLASS_ID(Proj,  Node, 3)
+      DEFINE_CLASS_ID(CatchProj, Proj, 0)
+      DEFINE_CLASS_ID(JumpProj,  Proj, 1)
+      DEFINE_CLASS_ID(IfTrue,    Proj, 2)
+      DEFINE_CLASS_ID(IfFalse,   Proj, 3)
+      DEFINE_CLASS_ID(Parm,      Proj, 4)
+      DEFINE_CLASS_ID(MachProj,  Proj, 5)
+
+    DEFINE_CLASS_ID(Mem,   Node, 4)
       DEFINE_CLASS_ID(Load,  Mem, 0)
+        DEFINE_CLASS_ID(VectorLoad,  Load, 0)
       DEFINE_CLASS_ID(Store, Mem, 1)
+        DEFINE_CLASS_ID(VectorStore, Store, 0)
       DEFINE_CLASS_ID(LoadStore, Mem, 2)
 
+    DEFINE_CLASS_ID(Region, Node, 5)
+      DEFINE_CLASS_ID(Loop, Region, 0)
+        DEFINE_CLASS_ID(Root,        Loop, 0)
+        DEFINE_CLASS_ID(CountedLoop, Loop, 1)
+
+    DEFINE_CLASS_ID(Sub,   Node, 6)
+      DEFINE_CLASS_ID(Cmp,   Sub, 0)
+        DEFINE_CLASS_ID(FastLock,   Cmp, 0)
+        DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
+
     DEFINE_CLASS_ID(MergeMem, Node, 7)
     DEFINE_CLASS_ID(Bool,     Node, 8)
     DEFINE_CLASS_ID(AddP,     Node, 9)
     DEFINE_CLASS_ID(BoxLock,  Node, 10)
     DEFINE_CLASS_ID(Add,      Node, 11)
-    DEFINE_CLASS_ID(Mul,      Node, 12)
+    DEFINE_CLASS_ID(Vector,   Node, 12)
     DEFINE_CLASS_ID(ClearArray, Node, 13)
 
     _max_classes  = ClassMask_ClearArray
@@ -621,21 +633,15 @@
   // Flags are sorted by usage frequency.
   enum NodeFlags {
     Flag_is_Copy             = 0x01, // should be first bit to avoid shift
-    Flag_is_Call             = Flag_is_Copy << 1,
-    Flag_rematerialize       = Flag_is_Call << 1,
+    Flag_rematerialize       = Flag_is_Copy << 1,
     Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
     Flag_is_macro            = Flag_needs_anti_dependence_check << 1,
     Flag_is_Con              = Flag_is_macro << 1,
     Flag_is_cisc_alternate   = Flag_is_Con << 1,
-    Flag_is_Branch           = Flag_is_cisc_alternate << 1,
-    Flag_is_block_start      = Flag_is_Branch << 1,
-    Flag_is_Goto             = Flag_is_block_start << 1,
-    Flag_is_dead_loop_safe   = Flag_is_Goto << 1,
+    Flag_is_dead_loop_safe   = Flag_is_cisc_alternate << 1,
     Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
-    Flag_is_safepoint_node   = Flag_may_be_short_branch << 1,
-    Flag_is_pc_relative      = Flag_is_safepoint_node << 1,
-    Flag_is_Vector           = Flag_is_pc_relative << 1,
-    _max_flags = (Flag_is_Vector << 1) - 1 // allow flags combination
+    Flag_avoid_back_to_back  = Flag_may_be_short_branch << 1,
+    _max_flags = (Flag_avoid_back_to_back << 1) - 1 // allow flags combination
   };
 
 private:
@@ -669,21 +675,6 @@
   virtual uint size_of() const;
 
   // Other interesting Node properties
-
-  // Special case: is_Call() returns true for both CallNode and MachCallNode.
-  bool is_Call() const {
-    return (_flags & Flag_is_Call) != 0;
-  }
-
-  CallNode* isa_Call() const {
-    return is_Call() ? as_Call() : NULL;
-  }
-
-  CallNode *as_Call() const { // Only for CallNode (not for MachCallNode)
-    assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class");
-    return (CallNode*)this;
-  }
-
   #define DEFINE_CLASS_QUERY(type)                           \
   bool is_##type() const {                                   \
     return ((_class_id & ClassMask_##type) == Class_##type); \
@@ -703,6 +694,7 @@
   DEFINE_CLASS_QUERY(AllocateArray)
   DEFINE_CLASS_QUERY(Bool)
   DEFINE_CLASS_QUERY(BoxLock)
+  DEFINE_CLASS_QUERY(Call)
   DEFINE_CLASS_QUERY(CallDynamicJava)
   DEFINE_CLASS_QUERY(CallJava)
   DEFINE_CLASS_QUERY(CallLeaf)
@@ -732,6 +724,7 @@
   DEFINE_CLASS_QUERY(Lock)
   DEFINE_CLASS_QUERY(Loop)
   DEFINE_CLASS_QUERY(Mach)
+  DEFINE_CLASS_QUERY(MachBranch)
   DEFINE_CLASS_QUERY(MachCall)
   DEFINE_CLASS_QUERY(MachCallDynamicJava)
   DEFINE_CLASS_QUERY(MachCallJava)
@@ -740,8 +733,10 @@
   DEFINE_CLASS_QUERY(MachCallStaticJava)
   DEFINE_CLASS_QUERY(MachConstantBase)
   DEFINE_CLASS_QUERY(MachConstant)
+  DEFINE_CLASS_QUERY(MachGoto)
   DEFINE_CLASS_QUERY(MachIf)
   DEFINE_CLASS_QUERY(MachNullCheck)
+  DEFINE_CLASS_QUERY(MachProj)
   DEFINE_CLASS_QUERY(MachReturn)
   DEFINE_CLASS_QUERY(MachSafePoint)
   DEFINE_CLASS_QUERY(MachSpillCopy)
@@ -749,7 +744,6 @@
   DEFINE_CLASS_QUERY(Mem)
   DEFINE_CLASS_QUERY(MemBar)
   DEFINE_CLASS_QUERY(MergeMem)
-  DEFINE_CLASS_QUERY(Mul)
   DEFINE_CLASS_QUERY(Multi)
   DEFINE_CLASS_QUERY(MultiBranch)
   DEFINE_CLASS_QUERY(Parm)
@@ -764,6 +758,9 @@
   DEFINE_CLASS_QUERY(Store)
   DEFINE_CLASS_QUERY(Sub)
   DEFINE_CLASS_QUERY(Type)
+  DEFINE_CLASS_QUERY(Vector)
+  DEFINE_CLASS_QUERY(VectorLoad)
+  DEFINE_CLASS_QUERY(VectorStore)
   DEFINE_CLASS_QUERY(Unlock)
 
   #undef DEFINE_CLASS_QUERY
@@ -774,7 +771,6 @@
   }
 
   bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
-  bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; }
   // The data node which is safe to leave in dead loop during IGVN optimization.
   bool is_dead_loop_safe() const {
     return is_Phi() || (is_Proj() && in(0) == NULL) ||
@@ -795,9 +791,6 @@
   // skip some other important test.)
   virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
 
-  // defined for MachNodes that match 'If' | 'Goto' | 'CountedLoopEnd'
-  bool is_Branch() const { return (_flags & Flag_is_Branch) != 0; }
-
   // When building basic blocks, I need to have a notion of block beginning
   // Nodes, next block selector Nodes (block enders), and next block
   // projections.  These calls need to work on their machine equivalents.  The
@@ -806,7 +799,7 @@
     if ( is_Region() )
       return this == (const Node*)in(0);
     else
-      return (_flags & Flag_is_block_start) != 0;
+      return is_Start();
   }
 
   // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
@@ -816,9 +809,6 @@
   // The node is a "macro" node which needs to be expanded before matching
   bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
 
-  // Value is a vector of primitive values
-  bool is_Vector() const { return (_flags & Flag_is_Vector) != 0; }
-
 //----------------- Optimization
 
   // Get the worst-case Type output for this Node.
@@ -1298,6 +1288,7 @@
 // Note that the constructor just zeros things, and since I use Arena
 // allocation I do not need a destructor to reclaim storage.
 class Node_Array : public ResourceObj {
+  friend class VMStructs;
 protected:
   Arena *_a;                    // Arena to allocate in
   uint   _max;
@@ -1328,6 +1319,7 @@
 };
 
 class Node_List : public Node_Array {
+  friend class VMStructs;
   uint _cnt;
 public:
   Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
@@ -1351,6 +1343,7 @@
 
 //------------------------------Unique_Node_List-------------------------------
 class Unique_Node_List : public Node_List {
+  friend class VMStructs;
   VectorSet _in_worklist;
   uint _clock_index;            // Index in list where to pop from next
 public:
@@ -1401,6 +1394,7 @@
 
 //------------------------------Node_Stack-------------------------------------
 class Node_Stack {
+  friend class VMStructs;
 protected:
   struct INode {
     Node *node; // Processed node
@@ -1473,6 +1467,7 @@
 // Debugging or profiling annotations loosely and sparsely associated
 // with some nodes.  See Compile::node_notes_at for the accessor.
 class Node_Notes VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
   JVMState* _jvms;
 
 public:
--- a/src/share/vm/opto/optoreg.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/optoreg.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -172,6 +172,7 @@
 // and converting that will return OptoReg::Bad losing the identity of the OptoReg.
 
 class OptoRegPair {
+  friend class VMStructs;
 private:
   short _second;
   short _first;
--- a/src/share/vm/opto/output.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/output.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -128,6 +128,14 @@
   if ( ZapDeadCompiledLocals )  Insert_zap_nodes();
 # endif
 
+  uint* blk_starts = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks+1);
+  blk_starts[0]    = 0;
+
+  // Initialize code buffer and process short branches.
+  CodeBuffer* cb = init_buffer(blk_starts);
+
+  if (cb == NULL || failing())  return;
+
   ScheduleAndBundle();
 
 #ifndef PRODUCT
@@ -152,7 +160,7 @@
 
   if (failing())  return;
 
-  Fill_buffer();
+  fill_buffer(cb, blk_starts);
 }
 
 bool Compile::need_stack_bang(int frame_size_in_bytes) const {
@@ -325,22 +333,22 @@
   } // if( MaxLoopPad < OptoLoopAlignment-1 )
 }
 
-//----------------------Shorten_branches---------------------------------------
+//----------------------shorten_branches---------------------------------------
 // The architecture description provides short branch variants for some long
 // branch instructions. Replace eligible long branches with short branches.
-void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size) {
-
-  // fill in the nop array for bundling computations
-  MachNode *_nop_list[Bundle::_nop_count];
-  Bundle::initialize_nops(_nop_list, this);
+void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size) {
 
   // ------------------
   // Compute size of each block, method size, and relocation information size
-  uint *jmp_end    = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks);
-  uint *blk_starts = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks+1);
-  DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks); )
-  DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks); )
-  blk_starts[0]    = 0;
+  uint nblocks  = _cfg->_num_blocks;
+
+  uint*      jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
+  uint*      jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
+  int*       jmp_nidx   = NEW_RESOURCE_ARRAY(int ,nblocks);
+  DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks); )
+  DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks); )
+
+  bool has_short_branch_candidate = false;
 
   // Initialize the sizes to 0
   code_size  = 0;          // Size in bytes of generated code
@@ -350,28 +358,34 @@
   reloc_size = 1;          // Number of relocation entries
 
   // Make three passes.  The first computes pessimistic blk_starts,
-  // relative jmp_end and reloc_size information.  The second performs
+  // relative jmp_offset and reloc_size information.  The second performs
   // short branch substitution using the pessimistic sizing.  The
   // third inserts nops where needed.
 
-  Node *nj; // tmp
-
   // Step one, perform a pessimistic sizing pass.
-  uint i;
-  uint min_offset_from_last_call = 1;  // init to a positive value
+  uint last_call_adr = max_uint;
+  uint last_avoid_back_to_back_adr = max_uint;
   uint nop_size = (new (this) MachNopNode())->size(_regalloc);
-  for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
+  for (uint i = 0; i < nblocks; i++) { // For all blocks
     Block *b = _cfg->_blocks[i];
 
+    // During short branch replacement, we store the relative (to blk_starts)
+    // offset of jump in jmp_offset, rather than the absolute offset of jump.
+    // This is so that we do not need to recompute sizes of all nodes when
+    // we compute correct blk_starts in our next sizing pass.
+    jmp_offset[i] = 0;
+    jmp_size[i]   = 0;
+    jmp_nidx[i]   = -1;
+    DEBUG_ONLY( jmp_target[i] = 0; )
+    DEBUG_ONLY( jmp_rule[i]   = 0; )
+
     // Sum all instruction sizes to compute block size
     uint last_inst = b->_nodes.size();
     uint blk_size = 0;
-    for( uint j = 0; j<last_inst; j++ ) {
-      nj = b->_nodes[j];
-      uint inst_size = nj->size(_regalloc);
-      blk_size += inst_size;
+    for (uint j = 0; j < last_inst; j++) {
+      Node* nj = b->_nodes[j];
       // Handle machine instruction nodes
-      if( nj->is_Mach() ) {
+      if (nj->is_Mach()) {
         MachNode *mach = nj->as_Mach();
         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
         reloc_size += mach->reloc();
@@ -388,32 +402,52 @@
         } else if (mach->is_MachSafePoint()) {
           // If call/safepoint are adjacent, account for possible
           // nop to disambiguate the two safepoints.
-          if (min_offset_from_last_call == 0) {
+          // ScheduleAndBundle() can rearrange nodes in a block,
+          // check for all offsets inside this block.
+          if (last_call_adr >= blk_starts[i]) {
+            blk_size += nop_size;
+          }
+        }
+        if (mach->avoid_back_to_back()) {
+          // Nop is inserted between "avoid back to back" instructions.
+          // ScheduleAndBundle() can rearrange nodes in a block,
+          // check for all offsets inside this block.
+          if (last_avoid_back_to_back_adr >= blk_starts[i]) {
             blk_size += nop_size;
           }
         }
+        if (mach->may_be_short_branch()) {
+          if (!nj->is_MachBranch()) {
+#ifndef PRODUCT
+            nj->dump(3);
+#endif
+            Unimplemented();
+          }
+          assert(jmp_nidx[i] == -1, "block should have only one branch");
+          jmp_offset[i] = blk_size;
+          jmp_size[i]   = nj->size(_regalloc);
+          jmp_nidx[i]   = j;
+          has_short_branch_candidate = true;
+        }
       }
-      min_offset_from_last_call += inst_size;
+      blk_size += nj->size(_regalloc);
       // Remember end of call offset
-      if (nj->is_MachCall() && nj->as_MachCall()->is_safepoint_node()) {
-        min_offset_from_last_call = 0;
+      if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
+        last_call_adr = blk_starts[i]+blk_size;
+      }
+      // Remember end of avoid_back_to_back offset
+      if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
+        last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
       }
     }
 
-    // During short branch replacement, we store the relative (to blk_starts)
-    // end of jump in jmp_end, rather than the absolute end of jump.  This
-    // is so that we do not need to recompute sizes of all nodes when we compute
-    // correct blk_starts in our next sizing pass.
-    jmp_end[i] = blk_size;
-    DEBUG_ONLY( jmp_target[i] = 0; )
-
     // When the next block starts a loop, we may insert pad NOP
     // instructions.  Since we cannot know our future alignment,
     // assume the worst.
-    if( i<_cfg->_num_blocks-1 ) {
+    if (i< nblocks-1) {
       Block *nb = _cfg->_blocks[i+1];
       int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
-      if( max_loop_pad > 0 ) {
+      if (max_loop_pad > 0) {
         assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
         blk_size += max_loop_pad;
       }
@@ -424,124 +458,100 @@
   }
 
   // Step two, replace eligible long jumps.
-
-  // Note: this will only get the long branches within short branch
-  //   range. Another pass might detect more branches that became
-  //   candidates because the shortening in the first pass exposed
-  //   more opportunities. Unfortunately, this would require
-  //   recomputing the starting and ending positions for the blocks
-  for( i=0; i<_cfg->_num_blocks; i++ ) {
-    Block *b = _cfg->_blocks[i];
-
-    int j;
-    // Find the branch; ignore trailing NOPs.
-    for( j = b->_nodes.size()-1; j>=0; j-- ) {
-      nj = b->_nodes[j];
-      if( !nj->is_Mach() || nj->as_Mach()->ideal_Opcode() != Op_Con )
-        break;
-    }
-
-    if (j >= 0) {
-      if( nj->is_Mach() && nj->as_Mach()->may_be_short_branch() ) {
-        MachNode *mach = nj->as_Mach();
+  bool progress = true;
+  uint last_may_be_short_branch_adr = max_uint;
+  while (has_short_branch_candidate && progress) {
+    progress = false;
+    has_short_branch_candidate = false;
+    int adjust_block_start = 0;
+    for (uint i = 0; i < nblocks; i++) {
+      Block *b = _cfg->_blocks[i];
+      int idx = jmp_nidx[i];
+      MachNode* mach = (idx == -1) ? NULL: b->_nodes[idx]->as_Mach();
+      if (mach != NULL && mach->may_be_short_branch()) {
+#ifdef ASSERT
+        assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
+        int j;
+        // Find the branch; ignore trailing NOPs.
+        for (j = b->_nodes.size()-1; j>=0; j--) {
+          Node* n = b->_nodes[j];
+          if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
+            break;
+        }
+        assert(j >= 0 && j == idx && b->_nodes[j] == (Node*)mach, "sanity");
+#endif
+        int br_size = jmp_size[i];
+        int br_offs = blk_starts[i] + jmp_offset[i];
+
         // This requires the TRUE branch target be in succs[0]
         uint bnum = b->non_connector_successor(0)->_pre_order;
-        uintptr_t target = blk_starts[bnum];
-        if( mach->is_pc_relative() ) {
-          int offset = target-(blk_starts[i] + jmp_end[i]);
-          if (_matcher->is_short_branch_offset(mach->rule(), offset)) {
-            // We've got a winner.  Replace this branch.
-            MachNode* replacement = mach->short_branch_version(this);
-            b->_nodes.map(j, replacement);
-            mach->subsume_by(replacement);
-
-            // Update the jmp_end size to save time in our
-            // next pass.
-            jmp_end[i] -= (mach->size(_regalloc) - replacement->size(_regalloc));
-            DEBUG_ONLY( jmp_target[i] = bnum; );
-            DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
+        int offset = blk_starts[bnum] - br_offs;
+        if (bnum > i) { // adjust following block's offset
+          offset -= adjust_block_start;
+        }
+        // In the following code a nop could be inserted before
+        // the branch which will increase the backward distance.
+        bool needs_padding = ((uint)br_offs == last_may_be_short_branch_adr);
+        if (needs_padding && offset <= 0)
+          offset -= nop_size;
+
+        if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
+          // We've got a winner.  Replace this branch.
+          MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
+
+          // Update the jmp_size.
+          int new_size = replacement->size(_regalloc);
+          int diff     = br_size - new_size;
+          assert(diff >= (int)nop_size, "short_branch size should be smaller");
+          // Conservatively take into accound padding between
+          // avoid_back_to_back branches. Previous branch could be
+          // converted into avoid_back_to_back branch during next
+          // rounds.
+          if (needs_padding && replacement->avoid_back_to_back()) {
+            jmp_offset[i] += nop_size;
+            diff -= nop_size;
           }
+          adjust_block_start += diff;
+          b->_nodes.map(idx, replacement);
+          mach->subsume_by(replacement);
+          mach = replacement;
+          progress = true;
+
+          jmp_size[i] = new_size;
+          DEBUG_ONLY( jmp_target[i] = bnum; );
+          DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
         } else {
-#ifndef PRODUCT
-          mach->dump(3);
-#endif
-          Unimplemented();
+          // The jump distance is not short, try again during next iteration.
+          has_short_branch_candidate = true;
         }
+      } // (mach->may_be_short_branch())
+      if (mach != NULL && (mach->may_be_short_branch() ||
+                           mach->avoid_back_to_back())) {
+        last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
       }
-    }
-  }
-
-  // Compute the size of first NumberOfLoopInstrToAlign instructions at head
-  // of a loop. It is used to determine the padding for loop alignment.
-  compute_loop_first_inst_sizes();
-
-  // Step 3, compute the offsets of all the labels
-  uint last_call_adr = max_uint;
-  for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
-    // copy the offset of the beginning to the corresponding label
-    assert(labels[i].is_unused(), "cannot patch at this point");
-    labels[i].bind_loc(blk_starts[i], CodeBuffer::SECT_INSTS);
-
-    // insert padding for any instructions that need it
-    Block *b = _cfg->_blocks[i];
-    uint last_inst = b->_nodes.size();
-    uint adr = blk_starts[i];
-    for( uint j = 0; j<last_inst; j++ ) {
-      nj = b->_nodes[j];
-      if( nj->is_Mach() ) {
-        int padding = nj->as_Mach()->compute_padding(adr);
-        // If call/safepoint are adjacent insert a nop (5010568)
-        if (padding == 0 && nj->is_MachSafePoint() && !nj->is_MachCall() &&
-            adr == last_call_adr ) {
-          padding = nop_size;
-        }
-        if(padding > 0) {
-          assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
-          int nops_cnt = padding / nop_size;
-          MachNode *nop = new (this) MachNopNode(nops_cnt);
-          b->_nodes.insert(j++, nop);
-          _cfg->_bbs.map( nop->_idx, b );
-          adr += padding;
-          last_inst++;
-        }
-      }
-      adr += nj->size(_regalloc);
-
-      // Remember end of call offset
-      if (nj->is_MachCall() && nj->as_MachCall()->is_safepoint_node()) {
-        last_call_adr = adr;
-      }
-    }
-
-    if ( i != _cfg->_num_blocks-1) {
-      // Get the size of the block
-      uint blk_size = adr - blk_starts[i];
-
-      // When the next block is the top of a loop, we may insert pad NOP
-      // instructions.
-      Block *nb = _cfg->_blocks[i+1];
-      int current_offset = blk_starts[i] + blk_size;
-      current_offset += nb->alignment_padding(current_offset);
-      // Save block size; update total method size
-      blk_starts[i+1] = current_offset;
+      blk_starts[i+1] -= adjust_block_start;
     }
   }
 
 #ifdef ASSERT
-  for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
-    if( jmp_target[i] != 0 ) {
-      int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_end[i]);
-      if (!_matcher->is_short_branch_offset(jmp_rule[i], offset)) {
-        tty->print_cr("target (%d) - jmp_end(%d) = offset (%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_end[i], offset, i, jmp_target[i]);
+  for (uint i = 0; i < nblocks; i++) { // For all blocks
+    if (jmp_target[i] != 0) {
+      int br_size = jmp_size[i];
+      int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
+      if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
+        tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
       }
-      assert(_matcher->is_short_branch_offset(jmp_rule[i], offset), "Displacement too large for short jmp");
+      assert(_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset), "Displacement too large for short jmp");
     }
   }
 #endif
 
+  // Step 3, compute the offsets of all blocks, will be done in fill_buffer()
+  // after ScheduleAndBundle().
+
   // ------------------
   // Compute size for code buffer
-  code_size   = blk_starts[i-1] + jmp_end[i-1];
+  code_size = blk_starts[nblocks];
 
   // Relocation records
   reloc_size += 1;              // Relo entry for exception handler
@@ -550,7 +560,7 @@
   // Min is 2 bytes, max is probably 6 or 8, with a tax up to 25% for
   // a relocation index.
   // The CodeBuffer will expand the locs array if this estimate is too low.
-  reloc_size   *= 10 / sizeof(relocInfo);
+  reloc_size *= 10 / sizeof(relocInfo);
 }
 
 //------------------------------FillLocArray-----------------------------------
@@ -1026,7 +1036,7 @@
 
 
 
-// helper for Fill_buffer bailout logic
+// helper for fill_buffer bailout logic
 static void turn_off_compiler(Compile* C) {
   if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) {
     // Do not turn off compilation if a single giant method has
@@ -1039,22 +1049,20 @@
 }
 
 
-//------------------------------Fill_buffer------------------------------------
-void Compile::Fill_buffer() {
+//------------------------------init_buffer------------------------------------
+CodeBuffer* Compile::init_buffer(uint* blk_starts) {
 
   // Set the initially allocated size
   int  code_req   = initial_code_capacity;
   int  locs_req   = initial_locs_capacity;
   int  stub_req   = TraceJumps ? initial_stub_capacity * 10 : initial_stub_capacity;
   int  const_req  = initial_const_capacity;
-  bool labels_not_set = true;
 
   int  pad_req    = NativeCall::instruction_size;
   // The extra spacing after the code is necessary on some platforms.
   // Sometimes we need to patch in a jump after the last instruction,
   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
 
-  uint i;
   // Compute the byte offset where we can store the deopt pc.
   if (fixed_slots() != 0) {
     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
@@ -1078,19 +1086,12 @@
     _frame_slots += 8*(16/BytesPerInt);
   }
 #endif
-  assert( _frame_slots >= 0 && _frame_slots < 1000000, "sanity check" );
-
-  // Create an array of unused labels, one for each basic block
-  Label *blk_labels = NEW_RESOURCE_ARRAY(Label, _cfg->_num_blocks+1);
-
-  for( i=0; i <= _cfg->_num_blocks; i++ ) {
-    blk_labels[i].init();
-  }
+  assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");
 
   if (has_mach_constant_base_node()) {
     // Fill the constant table.
-    // Note:  This must happen before Shorten_branches.
-    for (i = 0; i < _cfg->_num_blocks; i++) {
+    // Note:  This must happen before shorten_branches.
+    for (uint i = 0; i < _cfg->_num_blocks; i++) {
       Block* b = _cfg->_blocks[i];
 
       for (uint j = 0; j < b->_nodes.size(); j++) {
@@ -1114,14 +1115,11 @@
   // Initialize the space for the BufferBlob used to find and verify
   // instruction size in MachNode::emit_size()
   init_scratch_buffer_blob(const_req);
-  if (failing())  return; // Out of memory
-
-  // If this machine supports different size branch offsets, then pre-compute
-  // the length of the blocks
-  if( _matcher->is_short_branch_offset(-1, 0) ) {
-    Shorten_branches(blk_labels, code_req, locs_req, stub_req);
-    labels_not_set = false;
-  }
+  if (failing())  return NULL; // Out of memory
+
+  // Pre-compute the length of blocks and replace
+  // long branches with short if machine supports it.
+  shorten_branches(blk_starts, code_req, locs_req, stub_req);
 
   // nmethod and CodeBuffer count stubs & constants as part of method's code.
   int exception_handler_req = size_exception_handler();
@@ -1151,7 +1149,7 @@
   // Have we run out of code space?
   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     turn_off_compiler(this);
-    return;
+    return NULL;
   }
   // Configure the code buffer.
   cb->initialize_consts_size(const_req);
@@ -1162,18 +1160,31 @@
   MachNode *_nop_list[Bundle::_nop_count];
   Bundle::initialize_nops(_nop_list, this);
 
+  return cb;
+}
+
+//------------------------------fill_buffer------------------------------------
+void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
+  // blk_starts[] contains offsets calculated during short branches processing,
+  // offsets should not be increased during following steps.
+
+  // Compute the size of first NumberOfLoopInstrToAlign instructions at head
+  // of a loop. It is used to determine the padding for loop alignment.
+  compute_loop_first_inst_sizes();
+
   // Create oopmap set.
   _oop_map_set = new OopMapSet();
 
   // !!!!! This preserves old handling of oopmaps for now
   debug_info()->set_oopmaps(_oop_map_set);
 
+  uint nblocks  = _cfg->_num_blocks;
   // Count and start of implicit null check instructions
   uint inct_cnt = 0;
-  uint *inct_starts = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
+  uint *inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1);
 
   // Count and start of calls
-  uint *call_returns = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
+  uint *call_returns = NEW_RESOURCE_ARRAY(uint, nblocks+1);
 
   uint  return_offset = 0;
   int nop_size = (new (this) MachNopNode())->size(_regalloc);
@@ -1181,14 +1192,22 @@
   int previous_offset = 0;
   int current_offset  = 0;
   int last_call_offset = -1;
+  int last_avoid_back_to_back_offset = -1;
+#ifdef ASSERT
+  int block_alignment_padding = 0;
+
+  uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
+  uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
+  uint* jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
+  uint* jmp_rule   = NEW_RESOURCE_ARRAY(uint,nblocks);
+#endif
 
   // Create an array of unused labels, one for each basic block, if printing is enabled
 #ifndef PRODUCT
   int *node_offsets      = NULL;
-  uint  node_offset_limit = unique();
-
-
-  if ( print_assembly() )
+  uint node_offset_limit = unique();
+
+  if (print_assembly())
     node_offsets         = NEW_RESOURCE_ARRAY(int, node_offset_limit);
 #endif
 
@@ -1199,11 +1218,19 @@
     constant_table().emit(*cb);
   }
 
+  // Create an array of labels, one for each basic block
+  Label *blk_labels = NEW_RESOURCE_ARRAY(Label, nblocks+1);
+  for (uint i=0; i <= nblocks; i++) {
+    blk_labels[i].init();
+  }
+
   // ------------------
   // Now fill in the code buffer
   Node *delay_slot = NULL;
 
-  for( i=0; i < _cfg->_num_blocks; i++ ) {
+  for (uint i=0; i < nblocks; i++) {
+    guarantee(blk_starts[i] >= (uint)cb->insts_size(),"should not increase size");
+
     Block *b = _cfg->_blocks[i];
 
     Node *head = b->head();
@@ -1211,23 +1238,38 @@
     // If this block needs to start aligned (i.e, can be reached other
     // than by falling-thru from the previous block), then force the
     // start of a new bundle.
-    if( Pipeline::requires_bundling() && starts_bundle(head) )
+    if (Pipeline::requires_bundling() && starts_bundle(head))
       cb->flush_bundle(true);
 
+#ifdef ASSERT
+    if (!b->is_connector()) {
+      stringStream st;
+      b->dump_head(&_cfg->_bbs, &st);
+      MacroAssembler(cb).block_comment(st.as_string());
+    }
+    jmp_target[i] = 0;
+    jmp_offset[i] = 0;
+    jmp_size[i]   = 0;
+    jmp_rule[i]   = 0;
+
+    // Maximum alignment padding for loop block was used
+    // during first round of branches shortening, as result
+    // padding for nodes (sfpt after call) was not added.
+    // Take this into account for block's size change check
+    // and allow increase block's size by the difference
+    // of maximum and actual alignment paddings.
+    int orig_blk_size = blk_starts[i+1] - blk_starts[i] + block_alignment_padding;
+#endif
+    int blk_offset = current_offset;
+
     // Define the label at the beginning of the basic block
-    if (labels_not_set) {
-      MacroAssembler(cb).bind(blk_labels[b->_pre_order]);
-    } else {
-      assert(blk_labels[b->_pre_order].loc_pos() == cb->insts_size(),
-             err_msg("label position does not match code offset: %d != %d",
-                     blk_labels[b->_pre_order].loc_pos(), cb->insts_size()));
-    }
+    MacroAssembler(cb).bind(blk_labels[b->_pre_order]);
 
     uint last_inst = b->_nodes.size();
 
     // Emit block normally, except for last instruction.
     // Emit means "dump code bits into code buffer".
-    for( uint j = 0; j<last_inst; j++ ) {
+    for (uint j = 0; j<last_inst; j++) {
 
       // Get the node
       Node* n = b->_nodes[j];
@@ -1244,7 +1286,7 @@
 
       // If this starts a new instruction group, then flush the current one
       // (but allow split bundles)
-      if( Pipeline::requires_bundling() && starts_bundle(n) )
+      if (Pipeline::requires_bundling() && starts_bundle(n))
         cb->flush_bundle(false);
 
       // The following logic is duplicated in the code ifdeffed for
@@ -1253,25 +1295,32 @@
 
       // Special handling for SafePoint/Call Nodes
       bool is_mcall = false;
-      if( n->is_Mach() ) {
+      if (n->is_Mach()) {
         MachNode *mach = n->as_Mach();
         is_mcall = n->is_MachCall();
         bool is_sfn = n->is_MachSafePoint();
 
         // If this requires all previous instructions be flushed, then do so
-        if( is_sfn || is_mcall || mach->alignment_required() != 1) {
+        if (is_sfn || is_mcall || mach->alignment_required() != 1) {
           cb->flush_bundle(true);
           current_offset = cb->insts_size();
         }
 
+        // A padding may be needed again since a previous instruction
+        // could be moved to delay slot.
+
         // align the instruction if necessary
         int padding = mach->compute_padding(current_offset);
         // Make sure safepoint node for polling is distinct from a call's
         // return by adding a nop if needed.
-        if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset ) {
+        if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
           padding = nop_size;
         }
-        assert( labels_not_set || padding == 0, "instruction should already be aligned");
+        if (padding == 0 && mach->avoid_back_to_back() &&
+            current_offset == last_avoid_back_to_back_offset) {
+          // Avoid back to back some instructions.
+          padding = nop_size;
+        }
 
         if(padding > 0) {
           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
@@ -1295,20 +1344,20 @@
           // Save the return address
           call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset();
 
-          if (!mcall->is_safepoint_node()) {
+          if (mcall->is_MachCallLeaf()) {
             is_mcall = false;
             is_sfn = false;
           }
         }
 
         // sfn will be valid whenever mcall is valid now because of inheritance
-        if( is_sfn || is_mcall ) {
+        if (is_sfn || is_mcall) {
 
           // Handle special safepoint nodes for synchronization
-          if( !is_mcall ) {
+          if (!is_mcall) {
             MachSafePointNode *sfn = mach->as_MachSafePoint();
             // !!!!! Stubs only need an oopmap right now, so bail out
-            if( sfn->jvms()->method() == NULL) {
+            if (sfn->jvms()->method() == NULL) {
               // Write the oopmap directly to the code blob??!!
 #             ifdef ENABLE_ZAP_DEAD_LOCALS
               assert( !is_node_getting_a_safepoint(sfn),  "logic does not match; false positive");
@@ -1328,31 +1377,78 @@
         }
 
         // If this is a branch, then fill in the label with the target BB's label
-        else if ( mach->is_Branch() ) {
-
-          if ( mach->ideal_Opcode() == Op_Jump ) {
-            for (uint h = 0; h < b->_num_succs; h++ ) {
-              Block* succs_block = b->_succs[h];
-              for (uint j = 1; j < succs_block->num_preds(); j++) {
-                Node* jpn = succs_block->pred(j);
-                if ( jpn->is_JumpProj() && jpn->in(0) == mach ) {
-                  uint block_num = succs_block->non_connector()->_pre_order;
-                  Label *blkLabel = &blk_labels[block_num];
-                  mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
-                }
+        else if (mach->is_MachBranch()) {
+          // This requires the TRUE branch target be in succs[0]
+          uint block_num = b->non_connector_successor(0)->_pre_order;
+
+          // Try to replace long branch if delay slot is not used,
+          // it is mostly for back branches since forward branch's
+          // distance is not updated yet.
+          bool delay_slot_is_used = valid_bundle_info(n) &&
+                                    node_bundling(n)->use_unconditional_delay();
+          if (!delay_slot_is_used && mach->may_be_short_branch()) {
+           assert(delay_slot == NULL, "not expecting delay slot node");
+           int br_size = n->size(_regalloc);
+            int offset = blk_starts[block_num] - current_offset;
+            if (block_num >= i) {
+              // Current and following block's offset are not
+              // finilized yet, adjust distance by the difference
+              // between calculated and final offsets of current block.
+              offset -= (blk_starts[i] - blk_offset);
+            }
+            // In the following code a nop could be inserted before
+            // the branch which will increase the backward distance.
+            bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
+            if (needs_padding && offset <= 0)
+              offset -= nop_size;
+
+            if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
+              // We've got a winner.  Replace this branch.
+              MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
+
+              // Update the jmp_size.
+              int new_size = replacement->size(_regalloc);
+              assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
+              // Insert padding between avoid_back_to_back branches.
+              if (needs_padding && replacement->avoid_back_to_back()) {
+                MachNode *nop = new (this) MachNopNode();
+                b->_nodes.insert(j++, nop);
+                _cfg->_bbs.map(nop->_idx, b);
+                last_inst++;
+                nop->emit(*cb, _regalloc);
+                cb->flush_bundle(true);
+                current_offset = cb->insts_size();
+              }
+#ifdef ASSERT
+              jmp_target[i] = block_num;
+              jmp_offset[i] = current_offset - blk_offset;
+              jmp_size[i]   = new_size;
+              jmp_rule[i]   = mach->rule();
+#endif
+              b->_nodes.map(j, replacement);
+              mach->subsume_by(replacement);
+              n    = replacement;
+              mach = replacement;
+            }
+          }
+          mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
+        } else if (mach->ideal_Opcode() == Op_Jump) {
+          for (uint h = 0; h < b->_num_succs; h++) {
+            Block* succs_block = b->_succs[h];
+            for (uint j = 1; j < succs_block->num_preds(); j++) {
+              Node* jpn = succs_block->pred(j);
+              if (jpn->is_JumpProj() && jpn->in(0) == mach) {
+                uint block_num = succs_block->non_connector()->_pre_order;
+                Label *blkLabel = &blk_labels[block_num];
+                mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
               }
             }
-          } else {
-            // For Branchs
-            // This requires the TRUE branch target be in succs[0]
-            uint block_num = b->non_connector_successor(0)->_pre_order;
-            mach->label_set( blk_labels[block_num], block_num );
           }
         }
 
 #ifdef ASSERT
         // Check that oop-store precedes the card-mark
-        else if( mach->ideal_Opcode() == Op_StoreCM ) {
+        else if (mach->ideal_Opcode() == Op_StoreCM) {
           uint storeCM_idx = j;
           int count = 0;
           for (uint prec = mach->req(); prec < mach->len(); prec++) {
@@ -1371,7 +1467,7 @@
         }
 #endif
 
-        else if( !n->is_Proj() ) {
+        else if (!n->is_Proj()) {
           // Remember the beginning of the previous instruction, in case
           // it's followed by a flag-kill and a null-check.  Happens on
           // Intel all the time, with add-to-memory kind of opcodes.
@@ -1388,13 +1484,21 @@
 
       // Save the offset for the listing
 #ifndef PRODUCT
-      if( node_offsets && n->_idx < node_offset_limit )
+      if (node_offsets && n->_idx < node_offset_limit)
         node_offsets[n->_idx] = cb->insts_size();
 #endif
 
       // "Normal" instruction case
+      DEBUG_ONLY( uint instr_offset = cb->insts_size(); )
       n->emit(*cb, _regalloc);
       current_offset  = cb->insts_size();
+
+#ifdef ASSERT
+      if (n->size(_regalloc) < (current_offset-instr_offset)) {
+        n->dump();
+        assert(false, "wrong size of mach node");
+      }
+#endif
       non_safepoints.observe_instruction(n, current_offset);
 
       // mcall is last "call" that can be a safepoint
@@ -1408,8 +1512,13 @@
         last_call_offset = current_offset;
       }
 
+      if (n->is_Mach() && n->as_Mach()->avoid_back_to_back()) {
+        // Avoid back to back some instructions.
+        last_avoid_back_to_back_offset = current_offset;
+      }
+
       // See if this instruction has a delay slot
-      if ( valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
+      if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
         assert(delay_slot != NULL, "expecting delay slot node");
 
         // Back up 1 instruction
@@ -1417,15 +1526,15 @@
 
         // Save the offset for the listing
 #ifndef PRODUCT
-        if( node_offsets && delay_slot->_idx < node_offset_limit )
+        if (node_offsets && delay_slot->_idx < node_offset_limit)
           node_offsets[delay_slot->_idx] = cb->insts_size();
 #endif
 
         // Support a SafePoint in the delay slot
-        if( delay_slot->is_MachSafePoint() ) {
+        if (delay_slot->is_MachSafePoint()) {
           MachNode *mach = delay_slot->as_Mach();
           // !!!!! Stubs only need an oopmap right now, so bail out
-          if( !mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL ) {
+          if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
             // Write the oopmap directly to the code blob??!!
 #           ifdef ENABLE_ZAP_DEAD_LOCALS
             assert( !is_node_getting_a_safepoint(mach),  "logic does not match; false positive");
@@ -1449,12 +1558,14 @@
       }
 
     } // End for all instructions in block
+    assert((uint)blk_offset <= blk_starts[i], "shouldn't increase distance");
+    blk_starts[i] = blk_offset;
 
     // If the next block is the top of a loop, pad this block out to align
     // the loop top a little. Helps prevent pipe stalls at loop back branches.
-    if( i<_cfg->_num_blocks-1 ) {
+    if (i < nblocks-1) {
       Block *nb = _cfg->_blocks[i+1];
-      uint padding = nb->alignment_padding(current_offset);
+      int padding = nb->alignment_padding(current_offset);
       if( padding > 0 ) {
         MachNode *nop = new (this) MachNopNode(padding / nop_size);
         b->_nodes.insert( b->_nodes.size(), nop );
@@ -1462,9 +1573,18 @@
         nop->emit(*cb, _regalloc);
         current_offset = cb->insts_size();
       }
+#ifdef ASSERT
+      int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
+      block_alignment_padding = (max_loop_pad - padding);
+      assert(block_alignment_padding >= 0, "sanity");
+#endif
     }
+    // Verify that the distance for generated before forward
+    // short branches is still valid.
+    assert(orig_blk_size >= (current_offset - blk_offset), "shouldn't increase block size");
 
   } // End of for all blocks
+  blk_starts[nblocks] = current_offset;
 
   non_safepoints.flush_at_end();
 
@@ -1472,13 +1592,26 @@
   if (failing())  return;
 
   // Define a pseudo-label at the end of the code
-  MacroAssembler(cb).bind( blk_labels[_cfg->_num_blocks] );
+  MacroAssembler(cb).bind( blk_labels[nblocks] );
 
   // Compute the size of the first block
   _first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos();
 
   assert(cb->insts_size() < 500000, "method is unreasonably large");
 
+#ifdef ASSERT
+  for (uint i = 0; i < nblocks; i++) { // For all blocks
+    if (jmp_target[i] != 0) {
+      int br_size = jmp_size[i];
+      int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
+      if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
+        tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
+        assert(false, "Displacement too large for short jmp");
+      }
+    }
+  }
+#endif
+
   // ------------------
 
 #ifndef PRODUCT
@@ -1565,8 +1698,8 @@
       uint call_return = call_returns[b->_pre_order];
 #ifdef ASSERT
       assert( call_return > 0, "no call seen for this basic block" );
-      while( b->_nodes[--j]->Opcode() == Op_MachProj ) ;
-      assert( b->_nodes[j]->is_Call(), "CatchProj must follow call" );
+      while( b->_nodes[--j]->is_MachProj() ) ;
+      assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" );
 #endif
       // last instruction is a CatchNode, find it's CatchProjNodes
       int nof_succs = b->_num_succs;
@@ -1743,11 +1876,6 @@
   // Create a data structure for all the scheduling information
   Scheduling scheduling(Thread::current()->resource_area(), *this);
 
-  // Initialize the space for the BufferBlob used to find and verify
-  // instruction size in MachNode::emit_size()
-  init_scratch_buffer_blob(MAX_const_size);
-  if (failing())  return;  // Out of memory
-
   // Walk backwards over each basic block, computing the needed alignment
   // Walk over all the basic blocks
   scheduling.DoScheduling();
@@ -2028,7 +2156,7 @@
     // the first instruction at the branch target is
     // copied to the delay slot, and the branch goes to
     // the instruction after that at the branch target
-    if ( n->is_Mach() && n->is_Branch() ) {
+    if ( n->is_MachBranch() ) {
 
       assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
       assert( !n->is_Catch(),         "should not look for delay slot for Catch" );
@@ -2346,12 +2474,18 @@
     // have their delay slots filled in the template expansions, so we don't
     // bother scheduling them.
     Node *last = bb->_nodes[_bb_end];
+    // Ignore trailing NOPs.
+    while (_bb_end > 0 && last->is_Mach() &&
+           last->as_Mach()->ideal_Opcode() == Op_Con) {
+      last = bb->_nodes[--_bb_end];
+    }
+    assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
     if( last->is_Catch() ||
        // Exclude unreachable path case when Halt node is in a separate block.
        (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
       // There must be a prior call.  Skip it.
-      while( !bb->_nodes[--_bb_end]->is_Call() ) {
-        assert( bb->_nodes[_bb_end]->is_Proj(), "skipping projections after expected call" );
+      while( !bb->_nodes[--_bb_end]->is_MachCall() ) {
+        assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" );
       }
     } else if( last->is_MachNullCheck() ) {
       // Backup so the last null-checked memory instruction is
@@ -2663,7 +2797,7 @@
   for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
     Node *n = b->_nodes[i];
     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
-    if( n->Opcode() == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
+    if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
       // Fat-proj kills a slew of registers
       // This can add edges to 'n' and obscure whether or not it was a def,
       // hence the is_def flag.
@@ -2680,12 +2814,29 @@
       anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
     }
 
+    // Kill projections on a branch should appear to occur on the
+    // branch, not afterwards, so grab the masks from the projections
+    // and process them.
+    if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_Jump) {
+      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+        Node* use = n->fast_out(i);
+        if (use->is_Proj()) {
+          RegMask rm = use->out_RegMask();// Make local copy
+          while( rm.is_NotEmpty() ) {
+            OptoReg::Name kill = rm.find_first_elem();
+            rm.Remove(kill);
+            anti_do_def( b, n, kill, false );
+          }
+        }
+      }
+    }
+
     // Check each register used by this instruction for a following DEF/KILL
     // that must occur afterward and requires an anti-dependence edge.
     for( uint j=0; j<n->req(); j++ ) {
       Node *def = n->in(j);
       if( def ) {
-        assert( def->Opcode() != Op_MachProj || def->ideal_reg() != MachProjNode::fat_proj, "" );
+        assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
         anti_do_use( b, n, _regalloc->get_reg_first(def) );
         anti_do_use( b, n, _regalloc->get_reg_second(def) );
       }
--- a/src/share/vm/opto/parse.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/parse.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -41,6 +41,8 @@
 
 //------------------------------InlineTree-------------------------------------
 class InlineTree : public ResourceObj {
+  friend class VMStructs;
+
   Compile*    C;                  // cache
   JVMState*   _caller_jvms;       // state of caller
   ciMethod*   _method;            // method being called by the caller_jvms
@@ -54,7 +56,8 @@
   float compute_callee_frequency( int caller_bci ) const;
 
   GrowableArray<InlineTree*> _subtrees;
-  friend class Compile;
+
+  void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
 
 protected:
   InlineTree(Compile* C,
@@ -78,6 +81,8 @@
   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
 
 public:
+  static const char* check_can_parse(ciMethod* callee);
+
   static InlineTree* build_inline_tree_root();
   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
 
@@ -119,6 +124,8 @@
   uint        count_inlines()     const { return _count_inlines; };
 #endif
   GrowableArray<InlineTree*> subtrees() { return _subtrees; }
+
+  void print_value_on(outputStream* st) const PRODUCT_RETURN;
 };
 
 
--- a/src/share/vm/opto/parse2.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/parse2.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -752,20 +752,12 @@
 // Handle ret bytecode
 void Parse::do_ret() {
   // Find to whom we return.
-#if 0 // %%%% MAKE THIS WORK
-  Node* con = local();
-  const TypePtr* tp = con->bottom_type()->isa_ptr();
-  assert(tp && tp->singleton(), "");
-  int return_bci = (int) tp->get_con();
-  merge(return_bci);
-#else
   assert(block()->num_successors() == 1, "a ret can only go one place now");
   Block* target = block()->successor_at(0);
   assert(!target->is_ready(), "our arrival must be expected");
   profile_ret(target->flow()->start());
   int pnum = target->next_path_num();
   merge_common(target, pnum);
-#endif
 }
 
 //--------------------------dynamic_branch_prediction--------------------------
--- a/src/share/vm/opto/parse3.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/parse3.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -100,6 +100,14 @@
     }
   }
 
+  // Deoptimize on putfield writes to call site target field.
+  if (!is_get && field->is_call_site_target()) {
+    uncommon_trap(Deoptimization::Reason_unhandled,
+                  Deoptimization::Action_reinterpret,
+                  NULL, "put to call site target field");
+    return;
+  }
+
   assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility");
 
   // Note:  We do not check for an unloaded field type here any more.
@@ -139,19 +147,21 @@
 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
   // Does this field have a constant value?  If so, just push the value.
   if (field->is_constant()) {
+    // final field
     if (field->is_static()) {
       // final static field
       if (push_constant(field->constant_value()))
         return;
     }
     else {
-      // final non-static field of a trusted class (classes in
-      // java.lang.invoke and sun.invoke packages and subpackages).
+      // final non-static field
+      // Treat final non-static fields of trusted classes (classes in
+      // java.lang.invoke and sun.invoke packages and subpackages) as
+      // compile time constants.
       if (obj->is_Con()) {
         const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
         ciObject* constant_oop = oop_ptr->const_oop();
         ciConstant constant = field->constant_value_of(constant_oop);
-
         if (push_constant(constant, true))
           return;
       }
@@ -417,17 +427,10 @@
 
   // Note:  Array classes are always initialized; no is_initialized check.
 
-  enum { MAX_DIMENSION = 5 };
-  if (ndimensions > MAX_DIMENSION || ndimensions <= 0) {
-    uncommon_trap(Deoptimization::Reason_unhandled,
-                  Deoptimization::Action_none);
-    return;
-  }
-
   kill_dead_locals();
 
   // get the lengths from the stack (first dimension is on top)
-  Node* length[MAX_DIMENSION+1];
+  Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
   length[ndimensions] = NULL;  // terminating null for make_runtime_call
   int j;
   for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
@@ -470,20 +473,43 @@
 
   address fun = NULL;
   switch (ndimensions) {
-  //case 1: Actually, there is no case 1.  It's handled by new_array.
+  case 1: ShouldNotReachHere(); break;
   case 2: fun = OptoRuntime::multianewarray2_Java(); break;
   case 3: fun = OptoRuntime::multianewarray3_Java(); break;
   case 4: fun = OptoRuntime::multianewarray4_Java(); break;
   case 5: fun = OptoRuntime::multianewarray5_Java(); break;
-  default: ShouldNotReachHere();
   };
+  Node* c = NULL;
 
-  Node* c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
-                              OptoRuntime::multianewarray_Type(ndimensions),
-                              fun, NULL, TypeRawPtr::BOTTOM,
-                              makecon(TypeKlassPtr::make(array_klass)),
-                              length[0], length[1], length[2],
-                              length[3], length[4]);
+  if (fun != NULL) {
+    c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
+                          OptoRuntime::multianewarray_Type(ndimensions),
+                          fun, NULL, TypeRawPtr::BOTTOM,
+                          makecon(TypeKlassPtr::make(array_klass)),
+                          length[0], length[1], length[2],
+                          length[3], length[4]);
+  } else {
+    // Create a java array for dimension sizes
+    Node* dims = NULL;
+    { PreserveReexecuteState preexecs(this);
+      _sp += ndimensions;
+      Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
+      dims = new_array(dims_array_klass, intcon(ndimensions), 0);
+
+      // Fill-in it with values
+      for (j = 0; j < ndimensions; j++) {
+        Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
+        store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS);
+      }
+    }
+
+    c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
+                          OptoRuntime::multianewarrayN_Type(),
+                          OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM,
+                          makecon(TypeKlassPtr::make(array_klass)),
+                          dims);
+  }
+
   Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms));
 
   const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
@@ -496,7 +522,7 @@
   if (ltype != NULL)
     type = type->is_aryptr()->cast_to_size(ltype);
 
-  // We cannot sharpen the nested sub-arrays, since the top level is mutable.
+    // We cannot sharpen the nested sub-arrays, since the top level is mutable.
 
   Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) );
   push(cast);
--- a/src/share/vm/opto/postaloc.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/postaloc.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -72,7 +72,22 @@
   return i == limit;
 }
 
-
+//------------------------------yank-----------------------------------
+// Helper function for yank_if_dead
+int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
+  int blk_adjust=0;
+  Block *oldb = _cfg._bbs[old->_idx];
+  oldb->find_remove(old);
+  // Count 1 if deleting an instruction from the current block
+  if( oldb == current_block ) blk_adjust++;
+  _cfg._bbs.map(old->_idx,NULL);
+  OptoReg::Name old_reg = lrgs(n2lidx(old)).reg();
+  if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
+    value->map(old_reg,NULL);  // Yank from value/regnd maps
+    regnd->map(old_reg,NULL);  // This register's value is now unknown
+  }
+  return blk_adjust;
+}
 
 //------------------------------yank_if_dead-----------------------------------
 // Removed an edge from 'old'.  Yank if dead.  Return adjustment counts to
@@ -80,18 +95,23 @@
 int PhaseChaitin::yank_if_dead( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
   int blk_adjust=0;
   while (old->outcnt() == 0 && old != C->top()) {
-    Block *oldb = _cfg._bbs[old->_idx];
-    oldb->find_remove(old);
-    // Count 1 if deleting an instruction from the current block
-    if( oldb == current_block ) blk_adjust++;
-    _cfg._bbs.map(old->_idx,NULL);
-    OptoReg::Name old_reg = lrgs(n2lidx(old)).reg();
-    if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
-      value->map(old_reg,NULL);  // Yank from value/regnd maps
-      regnd->map(old_reg,NULL);  // This register's value is now unknown
+    blk_adjust += yank(old, current_block, value, regnd);
+
+    Node *tmp = NULL;
+    for (uint i = 1; i < old->req(); i++) {
+      if (old->in(i)->is_MachTemp()) {
+        // handle TEMP inputs
+        Node* machtmp = old->in(i);
+        if (machtmp->outcnt() == 1) {
+          assert(machtmp->unique_out() == old, "sanity");
+          blk_adjust += yank(machtmp, current_block, value, regnd);
+          machtmp->disconnect_inputs(NULL);
+        }
+      } else {
+        assert(tmp == NULL, "can't handle more non MachTemp inputs");
+        tmp = old->in(i);
+      }
     }
-    assert(old->req() <= 2, "can't handle more inputs");
-    Node *tmp = old->req() > 1 ? old->in(1) : NULL;
     old->disconnect_inputs(NULL);
     if( !tmp ) break;
     old = tmp;
--- a/src/share/vm/opto/reg_split.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/reg_split.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -984,7 +984,7 @@
               continue;
             }
 
-            if (UseFPUForSpilling && n->is_Call() && !uup && !dup ) {
+            if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) {
               // The use at the call can force the def down so insert
               // a split before the use to allow the def more freedom.
               maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
--- a/src/share/vm/opto/regalloc.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/regalloc.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@
 //------------------------------PhaseRegAlloc------------------------------------
 // Abstract register allocator
 class PhaseRegAlloc : public Phase {
+  friend class VMStructs;
   static void (*_alloc_statistics[MAX_REG_ALLOCATORS])();
   static int _num_allocators;
 
--- a/src/share/vm/opto/runtime.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/runtime.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -106,6 +106,7 @@
 address OptoRuntime::_multianewarray3_Java                        = NULL;
 address OptoRuntime::_multianewarray4_Java                        = NULL;
 address OptoRuntime::_multianewarray5_Java                        = NULL;
+address OptoRuntime::_multianewarrayN_Java                        = NULL;
 address OptoRuntime::_g1_wb_pre_Java                              = NULL;
 address OptoRuntime::_g1_wb_post_Java                             = NULL;
 address OptoRuntime::_vtable_must_compile_Java                    = NULL;
@@ -154,6 +155,7 @@
   gen(env, _multianewarray3_Java           , multianewarray3_Type         , multianewarray3_C               ,    0 , true , false, false);
   gen(env, _multianewarray4_Java           , multianewarray4_Type         , multianewarray4_C               ,    0 , true , false, false);
   gen(env, _multianewarray5_Java           , multianewarray5_Type         , multianewarray5_C               ,    0 , true , false, false);
+  gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true , false, false);
   gen(env, _g1_wb_pre_Java                 , g1_wb_pre_Type               , SharedRuntime::g1_wb_pre        ,    0 , false, false, false);
   gen(env, _g1_wb_post_Java                , g1_wb_post_Type              , SharedRuntime::g1_wb_post       ,    0 , false, false, false);
   gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C      ,    0 , false, false, false);
@@ -374,6 +376,24 @@
   thread->set_vm_result(obj);
 JRT_END
 
+JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(klassOopDesc* elem_type, arrayOopDesc* dims, JavaThread *thread))
+  assert(check_compiled_frame(thread), "incorrect caller");
+  assert(oop(elem_type)->is_klass(), "not a class");
+  assert(oop(dims)->is_typeArray(), "not an array");
+
+  ResourceMark rm;
+  jint len = dims->length();
+  assert(len > 0, "Dimensions array should contain data");
+  jint *j_dims = typeArrayOop(dims)->int_at_addr(0);
+  jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
+  Copy::conjoint_jints_atomic(j_dims, c_dims, len);
+
+  oop obj = arrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
+  deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
+  thread->set_vm_result(obj);
+JRT_END
+
+
 const TypeFunc *OptoRuntime::new_instance_Type() {
   // create input type (domain)
   const Type **fields = TypeTuple::fields(1);
@@ -454,6 +474,21 @@
   return multianewarray_Type(5);
 }
 
+const TypeFunc *OptoRuntime::multianewarrayN_Type() {
+  // create input type (domain)
+  const Type **fields = TypeTuple::fields(2);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;   // element klass
+  fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL;   // array of dim sizes
+  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
+
+  // create result type (range)
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
 const TypeFunc *OptoRuntime::g1_wb_pre_Type() {
   const Type **fields = TypeTuple::fields(2);
   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
@@ -943,7 +978,6 @@
 
     thread->set_exception_pc(pc);
     thread->set_exception_handler_pc(handler_address);
-    thread->set_exception_stack_size(0);
 
     // Check if the exception PC is a MethodHandle call site.
     thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
--- a/src/share/vm/opto/runtime.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/runtime.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -118,6 +118,7 @@
   static address _multianewarray3_Java;
   static address _multianewarray4_Java;
   static address _multianewarray5_Java;
+  static address _multianewarrayN_Java;
   static address _g1_wb_pre_Java;
   static address _g1_wb_post_Java;
   static address _vtable_must_compile_Java;
@@ -153,6 +154,7 @@
   static void multianewarray3_C(klassOopDesc* klass, int len1, int len2, int len3, JavaThread *thread);
   static void multianewarray4_C(klassOopDesc* klass, int len1, int len2, int len3, int len4, JavaThread *thread);
   static void multianewarray5_C(klassOopDesc* klass, int len1, int len2, int len3, int len4, int len5, JavaThread *thread);
+  static void multianewarrayN_C(klassOopDesc* klass, arrayOopDesc* dims, JavaThread *thread);
   static void g1_wb_pre_C(oopDesc* orig, JavaThread* thread);
   static void g1_wb_post_C(void* card_addr, JavaThread* thread);
 
@@ -210,6 +212,7 @@
   static address multianewarray3_Java()                  { return _multianewarray3_Java; }
   static address multianewarray4_Java()                  { return _multianewarray4_Java; }
   static address multianewarray5_Java()                  { return _multianewarray5_Java; }
+  static address multianewarrayN_Java()                  { return _multianewarrayN_Java; }
   static address g1_wb_pre_Java()                        { return _g1_wb_pre_Java; }
   static address g1_wb_post_Java()                       { return _g1_wb_post_Java; }
   static address vtable_must_compile_stub()              { return _vtable_must_compile_Java; }
@@ -249,6 +252,7 @@
   static const TypeFunc* multianewarray3_Type(); // multianewarray
   static const TypeFunc* multianewarray4_Type(); // multianewarray
   static const TypeFunc* multianewarray5_Type(); // multianewarray
+  static const TypeFunc* multianewarrayN_Type(); // multianewarray
   static const TypeFunc* g1_wb_pre_Type();
   static const TypeFunc* g1_wb_post_Type();
   static const TypeFunc* complete_monitor_enter_Type();
--- a/src/share/vm/opto/split_if.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/split_if.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -500,19 +500,14 @@
   region_cache.lru_insert( new_false, new_false );
   region_cache.lru_insert( new_true , new_true  );
   // Now handle all uses of the splitting block
-  for (DUIterator_Last kmin, k = region->last_outs(kmin); k >= kmin; --k) {
-    Node* phi = region->last_out(k);
-    if( !phi->in(0) ) {         // Dead phi?  Remove it
+  for (DUIterator k = region->outs(); region->has_out(k); k++) {
+    Node* phi = region->out(k);
+    if (!phi->in(0)) {         // Dead phi?  Remove it
       _igvn.remove_dead_node(phi);
-      continue;
-    }
-    assert( phi->in(0) == region, "" );
-    if( phi == region ) {       // Found the self-reference
-      phi->set_req(0, NULL);
-      continue;                 // Break the self-cycle
-    }
-    // Expected common case: Phi hanging off of Region
-    if( phi->is_Phi() ) {
+    } else if (phi == region) { // Found the self-reference
+      continue;                 // No roll-back of DUIterator
+    } else if (phi->is_Phi()) { // Expected common case: Phi hanging off of Region
+      assert(phi->in(0) == region, "Inconsistent graph");
       // Need a per-def cache.  Phi represents a def, so make a cache
       small_cache phi_cache;
 
@@ -524,22 +519,24 @@
         // collection of PHI's merging values from different paths.  The Phis
         // inserted depend only on the location of the USE.  We use a
         // 2-element cache to handle multiple uses from the same block.
-        handle_use( use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true );
+        handle_use(use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true);
       } // End of while phi has uses
-
-      // Because handle_use might relocate region->_out,
-      // we must refresh the iterator.
-      k = region->last_outs(kmin);
-
       // Remove the dead Phi
       _igvn.remove_dead_node( phi );
-
     } else {
+      assert(phi->in(0) == region, "Inconsistent graph");
       // Random memory op guarded by Region.  Compute new DEF for USE.
-      handle_use( phi, region, &region_cache, region_dom, new_false, new_true, old_false, old_true );
+      handle_use(phi, region, &region_cache, region_dom, new_false, new_true, old_false, old_true);
     }
+    // Every path above deletes a use of the region, except for the region
+    // self-cycle (which is needed by handle_use calling find_use_block
+    // calling get_ctrl calling get_ctrl_no_update looking for dead
+    // regions).  So roll back the DUIterator innards.
+    --k;
+  } // End of while merge point has phis
 
-  } // End of while merge point has phis
+  assert(region->outcnt() == 1, "Only self reference should remain"); // Just Self on the Region
+  region->set_req(0, NULL);       // Break the self-cycle
 
   // Any leftover bits in the splitting block must not have depended on local
   // Phi inputs (these have already been split-up).  Hence it's safe to hoist
--- a/src/share/vm/opto/superword.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/superword.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -70,6 +70,8 @@
   assert(lpt->_head->is_CountedLoop(), "must be");
   CountedLoopNode *cl = lpt->_head->as_CountedLoop();
 
+  if (!cl->is_valid_counted_loop()) return; // skip malformed counted loop
+
   if (!cl->is_main_loop() ) return; // skip normal, pre, and post loops
 
   // Check for no control flow in body (other than exit)
@@ -1167,7 +1169,7 @@
 
       } else if (n->is_Store()) {
         // Promote value to be stored to vector
-        VectorNode* val = vector_opd(p, MemNode::ValueIn);
+        Node* val = vector_opd(p, MemNode::ValueIn);
 
         int   opc = n->Opcode();
         Node* ctl = n->in(MemNode::Control);
@@ -1199,7 +1201,7 @@
 
 //------------------------------vector_opd---------------------------
 // Create a vector operand for the nodes in pack p for operand: in(opd_idx)
-VectorNode* SuperWord::vector_opd(Node_List* p, int opd_idx) {
+Node* SuperWord::vector_opd(Node_List* p, int opd_idx) {
   Node* p0 = p->at(0);
   uint vlen = p->size();
   Node* opd = p0->in(opd_idx);
@@ -1215,9 +1217,10 @@
   }
 
   if (same_opd) {
-    if (opd->is_Vector()) {
-      return (VectorNode*)opd; // input is matching vector
+    if (opd->is_Vector() || opd->is_VectorLoad()) {
+      return opd; // input is matching vector
     }
+    assert(!opd->is_VectorStore(), "such vector is not expected here");
     // Convert scalar input to vector. Use p0's type because it's container
     // maybe smaller than the operand's container.
     const Type* opd_t = velt_type(!in_bb(opd) ? p0 : opd);
--- a/src/share/vm/opto/superword.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/superword.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -360,7 +360,7 @@
   // Convert packs into vector node operations
   void output();
   // Create a vector operand for the nodes in pack p for operand: in(opd_idx)
-  VectorNode* vector_opd(Node_List* p, int opd_idx);
+  Node* vector_opd(Node_List* p, int opd_idx);
   // Can code be generated for pack p?
   bool implemented(Node_List* p);
   // For pack p, are all operands and all uses (with in the block) vector?
--- a/src/share/vm/opto/type.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/type.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -64,6 +64,8 @@
 // different kind of Type exists.  Types are never modified after creation, so
 // all their interesting fields are constant.
 class Type {
+  friend class VMStructs;
+
 public:
   enum TYPES {
     Bad=0,                      // Type check
--- a/src/share/vm/opto/vectornode.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/vectornode.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -426,7 +426,7 @@
 
 // Return the vector version of a scalar store node.
 VectorStoreNode* VectorStoreNode::make(Compile* C, int opc, Node* ctl, Node* mem,
-                                       Node* adr, const TypePtr* atyp, VectorNode* val,
+                                       Node* adr, const TypePtr* atyp, Node* val,
                                        uint vlen) {
   int vopc = opcode(opc, vlen);
 
--- a/src/share/vm/opto/vectornode.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/opto/vectornode.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -47,10 +47,10 @@
   friend class VectorStoreNode; // ditto.
 
   VectorNode(Node* n1, uint vlen) : Node(NULL, n1), _length(vlen) {
-    init_flags(Flag_is_Vector);
+    init_class_id(Class_Vector);
   }
   VectorNode(Node* n1, Node* n2, uint vlen) : Node(NULL, n1, n2), _length(vlen) {
-    init_flags(Flag_is_Vector);
+    init_class_id(Class_Vector);
   }
   virtual int Opcode() const;
 
@@ -389,7 +389,7 @@
  public:
   VectorLoadNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const Type *rt)
     : LoadNode(c,mem,adr,at,rt) {
-      init_flags(Flag_is_Vector);
+    init_class_id(Class_VectorLoad);
   }
   virtual int Opcode() const;
 
@@ -617,7 +617,7 @@
  public:
   VectorStoreNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val)
     : StoreNode(c,mem,adr,at,val) {
-      init_flags(Flag_is_Vector);
+    init_class_id(Class_VectorStore);
   }
   virtual int Opcode() const;
 
@@ -635,7 +635,7 @@
   static int opcode(int sopc, uint vlen);
 
   static VectorStoreNode* make(Compile* C, int opc, Node* ctl, Node* mem,
-                               Node* adr, const TypePtr* atyp, VectorNode* val,
+                               Node* adr, const TypePtr* atyp, Node* val,
                                uint vlen);
 };
 
--- a/src/share/vm/precompiled.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/precompiled.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -206,7 +206,6 @@
 # include "runtime/perfMemory.hpp"
 # include "runtime/prefetch.hpp"
 # include "runtime/reflection.hpp"
-# include "runtime/reflectionCompat.hpp"
 # include "runtime/reflectionUtils.hpp"
 # include "runtime/registerMap.hpp"
 # include "runtime/safepoint.hpp"
--- a/src/share/vm/prims/forte.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/forte.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -522,25 +522,6 @@
 extern "C" {
 JNIEXPORT
 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) {
-
-// This is if'd out because we no longer use thread suspension.
-// However if someone wanted to backport this to a 5.0 jvm then this
-// code would be important.
-#if 0
-  if (SafepointSynchronize::is_synchronizing()) {
-    // The safepoint mechanism is trying to synchronize all the threads.
-    // Since this can involve thread suspension, it is not safe for us
-    // to be here. We can reduce the deadlock risk window by quickly
-    // returning to the SIGPROF handler. However, it is still possible
-    // for VMThread to catch us here or in the SIGPROF handler. If we
-    // are suspended while holding a resource and another thread blocks
-    // on that resource in the SIGPROF handler, then we will have a
-    // three-thread deadlock (VMThread, this thread, the other thread).
-    trace->num_frames = ticks_safepoint; // -10
-    return;
-  }
-#endif
-
   JavaThread* thread;
 
   if (trace->env_id == NULL ||
--- a/src/share/vm/prims/jni.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jni.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -70,15 +70,6 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/histogram.hpp"
-#ifdef TARGET_ARCH_x86
-# include "jniTypes_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "jniTypes_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "jniTypes_zero.hpp"
-#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 # include "thread_linux.inline.hpp"
@@ -501,7 +492,7 @@
 
   // First check if this is a static field
   if (modifiers & JVM_ACC_STATIC) {
-    intptr_t offset = instanceKlass::cast(k1())->offset_from_fields( slot );
+    intptr_t offset = instanceKlass::cast(k1())->field_offset( slot );
     JNIid* id = instanceKlass::cast(k1())->jni_id_for(offset);
     assert(id != NULL, "corrupt Field object");
     debug_only(id->set_is_static_field_id();)
@@ -513,7 +504,7 @@
   // The slot is the index of the field description in the field-array
   // The jfieldID is the offset of the field within the object
   // It may also have hash bits for k, if VerifyJNIFields is turned on.
-  intptr_t offset = instanceKlass::cast(k1())->offset_from_fields( slot );
+  intptr_t offset = instanceKlass::cast(k1())->field_offset( slot );
   assert(instanceKlass::cast(k1())->contains_field_offset(offset), "stay within object");
   ret = jfieldIDWorkaround::to_instance_jfieldID(k1(), offset);
   return ret;
@@ -3296,6 +3287,19 @@
   return ret;
 }
 
+#ifndef PRODUCT
+
+#include "utilities/quickSort.hpp"
+
+void execute_internal_vm_tests() {
+  if (ExecuteInternalVMTests) {
+    assert(QuickSort::test_quick_sort(), "test_quick_sort failed");
+    tty->print_cr("All tests passed");
+  }
+}
+
+#endif
+
 HS_DTRACE_PROBE_DECL3(hotspot_jni, CreateJavaVM__entry, vm, penv, args);
 DT_RETURN_MARK_DECL(CreateJavaVM, jint);
 
@@ -3386,6 +3390,7 @@
   }
 
   NOT_PRODUCT(test_error_handler(ErrorHandlerTest));
+  NOT_PRODUCT(execute_internal_vm_tests());
   return result;
 }
 
--- a/src/share/vm/prims/jvm.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jvm.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -32,6 +32,7 @@
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
+#include "oops/fieldStreams.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "prims/jvm.h"
@@ -1493,7 +1494,7 @@
 
   fieldDescriptor fd;
   KlassHandle kh(THREAD, k);
-  intptr_t offset = instanceKlass::cast(kh())->offset_from_fields(slot);
+  intptr_t offset = instanceKlass::cast(kh())->field_offset(slot);
 
   if (modifiers & JVM_ACC_STATIC) {
     // for static fields we only look in the current class
@@ -1593,9 +1594,6 @@
   // Ensure class is linked
   k->link_class(CHECK_NULL);
 
-  typeArrayHandle fields(THREAD, k->fields());
-  int fields_len = fields->length();
-
   // 4496456 We need to filter out java.lang.Throwable.backtrace
   bool skip_backtrace = false;
 
@@ -1604,12 +1602,11 @@
 
   if (publicOnly) {
     num_fields = 0;
-    for (int i = 0, j = 0; i < fields_len; i += instanceKlass::next_offset, j++) {
-      int mods = fields->ushort_at(i + instanceKlass::access_flags_offset) & JVM_RECOGNIZED_FIELD_MODIFIERS;
-      if (mods & JVM_ACC_PUBLIC) ++num_fields;
+    for (JavaFieldStream fs(k()); !fs.done(); fs.next()) {
+      if (fs.access_flags().is_public()) ++num_fields;
     }
   } else {
-    num_fields = fields_len / instanceKlass::next_offset;
+    num_fields = k->java_fields_count();
 
     if (k() == SystemDictionary::Throwable_klass()) {
       num_fields--;
@@ -1622,16 +1619,15 @@
 
   int out_idx = 0;
   fieldDescriptor fd;
-  for (int i = 0; i < fields_len; i += instanceKlass::next_offset) {
+  for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
     if (skip_backtrace) {
       // 4496456 skip java.lang.Throwable.backtrace
-      int offset = k->offset_from_fields(i);
+      int offset = fs.offset();
       if (offset == java_lang_Throwable::get_backtrace_offset()) continue;
     }
 
-    int mods = fields->ushort_at(i + instanceKlass::access_flags_offset) & JVM_RECOGNIZED_FIELD_MODIFIERS;
-    if (!publicOnly || (mods & JVM_ACC_PUBLIC)) {
-      fd.initialize(k(), i);
+    if (!publicOnly || fs.access_flags().is_public()) {
+      fd.initialize(k(), fs.index());
       oop field = Reflection::new_field(&fd, UseNewReflection, CHECK_NULL);
       result->obj_at_put(out_idx, field);
       ++out_idx;
@@ -2119,7 +2115,7 @@
   k = JvmtiThreadState::class_to_verify_considering_redefinition(k, thread);
   if (!Klass::cast(k)->oop_is_instance())
     return 0;
-  return instanceKlass::cast(k)->fields()->length() / instanceKlass::next_offset;
+  return instanceKlass::cast(k)->java_fields_count();
 JVM_END
 
 
@@ -2215,8 +2211,7 @@
   JVMWrapper("JVM_GetFieldIxModifiers");
   klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(cls));
   k = JvmtiThreadState::class_to_verify_considering_redefinition(k, thread);
-  typeArrayOop fields = instanceKlass::cast(k)->fields();
-  return fields->ushort_at(field_index * instanceKlass::next_offset + instanceKlass::access_flags_offset) & JVM_RECOGNIZED_FIELD_MODIFIERS;
+  return instanceKlass::cast(k)->field_access_flags(field_index) & JVM_RECOGNIZED_FIELD_MODIFIERS;
 JVM_END
 
 
@@ -2399,7 +2394,7 @@
 JVM_END
 
 
-JVM_QUICK_ENTRY(jint, JVM_GetCPFieldModifiers(JNIEnv *env, jclass cls, int cp_index, jclass called_cls))
+JVM_ENTRY(jint, JVM_GetCPFieldModifiers(JNIEnv *env, jclass cls, int cp_index, jclass called_cls))
   JVMWrapper("JVM_GetCPFieldModifiers");
   klassOop k = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(cls));
   klassOop k_called = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(called_cls));
@@ -2411,12 +2406,9 @@
     case JVM_CONSTANT_Fieldref: {
       Symbol* name      = cp->uncached_name_ref_at(cp_index);
       Symbol* signature = cp->uncached_signature_ref_at(cp_index);
-      typeArrayOop fields = instanceKlass::cast(k_called)->fields();
-      int fields_count = fields->length();
-      for (int i = 0; i < fields_count; i += instanceKlass::next_offset) {
-        if (cp_called->symbol_at(fields->ushort_at(i + instanceKlass::name_index_offset)) == name &&
-            cp_called->symbol_at(fields->ushort_at(i + instanceKlass::signature_index_offset)) == signature) {
-          return fields->ushort_at(i + instanceKlass::access_flags_offset) & JVM_RECOGNIZED_FIELD_MODIFIERS;
+      for (JavaFieldStream fs(k_called); !fs.done(); fs.next()) {
+        if (fs.name() == name && fs.signature() == signature) {
+          return fs.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS;
         }
       }
       return -1;
@@ -4020,249 +4012,6 @@
 #endif
 
 
-//---------------------------------------------------------------------------
-//
-// Support for old native code-based reflection (pre-JDK 1.4)
-// Disabled by default in the product build.
-//
-// See reflection.hpp for information on SUPPORT_OLD_REFLECTION
-//
-//---------------------------------------------------------------------------
-
-#ifdef SUPPORT_OLD_REFLECTION
-
-JVM_ENTRY(jobjectArray, JVM_GetClassFields(JNIEnv *env, jclass cls, jint which))
-  JVMWrapper("JVM_GetClassFields");
-  JvmtiVMObjectAllocEventCollector oam;
-  oop mirror = JNIHandles::resolve_non_null(cls);
-  objArrayOop result = Reflection::reflect_fields(mirror, which, CHECK_NULL);
-  return (jobjectArray) JNIHandles::make_local(env, result);
-JVM_END
-
-
-JVM_ENTRY(jobjectArray, JVM_GetClassMethods(JNIEnv *env, jclass cls, jint which))
-  JVMWrapper("JVM_GetClassMethods");
-  JvmtiVMObjectAllocEventCollector oam;
-  oop mirror = JNIHandles::resolve_non_null(cls);
-  objArrayOop result = Reflection::reflect_methods(mirror, which, CHECK_NULL);
-  //%note jvm_r4
-  return (jobjectArray) JNIHandles::make_local(env, result);
-JVM_END
-
-
-JVM_ENTRY(jobjectArray, JVM_GetClassConstructors(JNIEnv *env, jclass cls, jint which))
-  JVMWrapper("JVM_GetClassConstructors");
-  JvmtiVMObjectAllocEventCollector oam;
-  oop mirror = JNIHandles::resolve_non_null(cls);
-  objArrayOop result = Reflection::reflect_constructors(mirror, which, CHECK_NULL);
-  //%note jvm_r4
-  return (jobjectArray) JNIHandles::make_local(env, result);
-JVM_END
-
-
-JVM_ENTRY(jobject, JVM_GetClassField(JNIEnv *env, jclass cls, jstring name, jint which))
-  JVMWrapper("JVM_GetClassField");
-  JvmtiVMObjectAllocEventCollector oam;
-  if (name == NULL) return NULL;
-  Handle str (THREAD, JNIHandles::resolve_non_null(name));
-
-  const char* cstr = java_lang_String::as_utf8_string(str());
-  TempNewSymbol field_name = SymbolTable::probe(cstr, (int)strlen(cstr));
-  if (field_name == NULL) {
-    THROW_0(vmSymbols::java_lang_NoSuchFieldException());
-  }
-
-  oop mirror = JNIHandles::resolve_non_null(cls);
-  oop result = Reflection::reflect_field(mirror, field_name, which, CHECK_NULL);
-  if (result == NULL) {
-    THROW_0(vmSymbols::java_lang_NoSuchFieldException());
-  }
-  return JNIHandles::make_local(env, result);
-JVM_END
-
-
-JVM_ENTRY(jobject, JVM_GetClassMethod(JNIEnv *env, jclass cls, jstring name, jobjectArray types, jint which))
-  JVMWrapper("JVM_GetClassMethod");
-  JvmtiVMObjectAllocEventCollector oam;
-  if (name == NULL) {
-    THROW_0(vmSymbols::java_lang_NullPointerException());
-  }
-  Handle str (THREAD, JNIHandles::resolve_non_null(name));
-
-  const char* cstr = java_lang_String::as_utf8_string(str());
-  TempNewSymbol method_name = SymbolTable::probe(cstr, (int)strlen(cstr));
-  if (method_name == NULL) {
-    THROW_0(vmSymbols::java_lang_NoSuchMethodException());
-  }
-
-  oop mirror = JNIHandles::resolve_non_null(cls);
-  objArrayHandle tarray (THREAD, objArrayOop(JNIHandles::resolve(types)));
-  oop result = Reflection::reflect_method(mirror, method_name, tarray,
-                                          which, CHECK_NULL);
-  if (result == NULL) {
-    THROW_0(vmSymbols::java_lang_NoSuchMethodException());
-  }
-  return JNIHandles::make_local(env, result);
-JVM_END
-
-
-JVM_ENTRY(jobject, JVM_GetClassConstructor(JNIEnv *env, jclass cls, jobjectArray types, jint which))
-  JVMWrapper("JVM_GetClassConstructor");
-  JvmtiVMObjectAllocEventCollector oam;
-  oop mirror = JNIHandles::resolve_non_null(cls);
-  objArrayHandle tarray (THREAD, objArrayOop(JNIHandles::resolve(types)));
-  oop result = Reflection::reflect_constructor(mirror, tarray, which, CHECK_NULL);
-  if (result == NULL) {
-    THROW_0(vmSymbols::java_lang_NoSuchMethodException());
-  }
-  return (jobject) JNIHandles::make_local(env, result);
-JVM_END
-
-
-// Instantiation ///////////////////////////////////////////////////////////////////////////////
-
-JVM_ENTRY(jobject, JVM_NewInstance(JNIEnv *env, jclass cls))
-  JVMWrapper("JVM_NewInstance");
-  Handle mirror(THREAD, JNIHandles::resolve_non_null(cls));
-
-  methodOop resolved_constructor = java_lang_Class::resolved_constructor(mirror());
-  if (resolved_constructor == NULL) {
-    klassOop k = java_lang_Class::as_klassOop(mirror());
-    // The java.lang.Class object caches a resolved constructor if all the checks
-    // below were done successfully and a constructor was found.
-
-    // Do class based checks
-    if (java_lang_Class::is_primitive(mirror())) {
-      const char* msg = "";
-      if      (mirror == Universe::bool_mirror())   msg = "java/lang/Boolean";
-      else if (mirror == Universe::char_mirror())   msg = "java/lang/Character";
-      else if (mirror == Universe::float_mirror())  msg = "java/lang/Float";
-      else if (mirror == Universe::double_mirror()) msg = "java/lang/Double";
-      else if (mirror == Universe::byte_mirror())   msg = "java/lang/Byte";
-      else if (mirror == Universe::short_mirror())  msg = "java/lang/Short";
-      else if (mirror == Universe::int_mirror())    msg = "java/lang/Integer";
-      else if (mirror == Universe::long_mirror())   msg = "java/lang/Long";
-      THROW_MSG_0(vmSymbols::java_lang_NullPointerException(), msg);
-    }
-
-    // Check whether we are allowed to instantiate this class
-    Klass::cast(k)->check_valid_for_instantiation(false, CHECK_NULL); // Array classes get caught here
-    instanceKlassHandle klass(THREAD, k);
-    // Make sure class is initialized (also so all methods are rewritten)
-    klass->initialize(CHECK_NULL);
-
-    // Lookup default constructor
-    resolved_constructor = klass->find_method(vmSymbols::object_initializer_name(), vmSymbols::void_method_signature());
-    if (resolved_constructor == NULL) {
-      ResourceMark rm(THREAD);
-      THROW_MSG_0(vmSymbols::java_lang_InstantiationException(), klass->external_name());
-    }
-
-    // Cache result in java.lang.Class object. Does not have to be MT safe.
-    java_lang_Class::set_resolved_constructor(mirror(), resolved_constructor);
-  }
-
-  assert(resolved_constructor != NULL, "sanity check");
-  methodHandle constructor = methodHandle(THREAD, resolved_constructor);
-
-  // We have an initialized instanceKlass with a default constructor
-  instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(cls)));
-  assert(klass->is_initialized() || klass->is_being_initialized(), "sanity check");
-
-  // Do security check
-  klassOop caller_klass = NULL;
-  if (UsePrivilegedStack) {
-    caller_klass = thread->security_get_caller_class(2);
-
-    if (!Reflection::verify_class_access(caller_klass, klass(), false) ||
-        !Reflection::verify_field_access(caller_klass,
-                                         klass(),
-                                         klass(),
-                                         constructor->access_flags(),
-                                         false,
-                                         true)) {
-      ResourceMark rm(THREAD);
-      THROW_MSG_0(vmSymbols::java_lang_IllegalAccessException(), klass->external_name());
-    }
-  }
-
-  // Allocate object and call constructor
-  Handle receiver = klass->allocate_instance_handle(CHECK_NULL);
-  JavaCalls::call_default_constructor(thread, constructor, receiver, CHECK_NULL);
-
-  jobject res = JNIHandles::make_local(env, receiver());
-  if (JvmtiExport::should_post_vm_object_alloc()) {
-    JvmtiExport::post_vm_object_alloc(JavaThread::current(), receiver());
-  }
-  return res;
-JVM_END
-
-
-// Field ////////////////////////////////////////////////////////////////////////////////////////////
-
-JVM_ENTRY(jobject, JVM_GetField(JNIEnv *env, jobject field, jobject obj))
-  JVMWrapper("JVM_GetField");
-  JvmtiVMObjectAllocEventCollector oam;
-  Handle field_mirror(thread, JNIHandles::resolve(field));
-  Handle receiver    (thread, JNIHandles::resolve(obj));
-  fieldDescriptor fd;
-  Reflection::resolve_field(field_mirror, receiver, &fd, false, CHECK_NULL);
-  jvalue value;
-  BasicType type = Reflection::field_get(&value, &fd, receiver);
-  oop box = Reflection::box(&value, type, CHECK_NULL);
-  return JNIHandles::make_local(env, box);
-JVM_END
-
-
-JVM_ENTRY(jvalue, JVM_GetPrimitiveField(JNIEnv *env, jobject field, jobject obj, unsigned char wCode))
-  JVMWrapper("JVM_GetPrimitiveField");
-  Handle field_mirror(thread, JNIHandles::resolve(field));
-  Handle receiver    (thread, JNIHandles::resolve(obj));
-  fieldDescriptor fd;
-  jvalue value;
-  value.j = 0;
-  Reflection::resolve_field(field_mirror, receiver, &fd, false, CHECK_(value));
-  BasicType type = Reflection::field_get(&value, &fd, receiver);
-  BasicType wide_type = (BasicType) wCode;
-  if (type != wide_type) {
-    Reflection::widen(&value, type, wide_type, CHECK_(value));
-  }
-  return value;
-JVM_END // should really be JVM_END, but that doesn't work for union types!
-
-
-JVM_ENTRY(void, JVM_SetField(JNIEnv *env, jobject field, jobject obj, jobject val))
-  JVMWrapper("JVM_SetField");
-  Handle field_mirror(thread, JNIHandles::resolve(field));
-  Handle receiver    (thread, JNIHandles::resolve(obj));
-  oop box = JNIHandles::resolve(val);
-  fieldDescriptor fd;
-  Reflection::resolve_field(field_mirror, receiver, &fd, true, CHECK);
-  BasicType field_type = fd.field_type();
-  jvalue value;
-  BasicType value_type;
-  if (field_type == T_OBJECT || field_type == T_ARRAY) {
-    // Make sure we do no unbox e.g. java/lang/Integer instances when storing into an object array
-    value_type = Reflection::unbox_for_regular_object(box, &value);
-    Reflection::field_set(&value, &fd, receiver, field_type, CHECK);
-  } else {
-    value_type = Reflection::unbox_for_primitive(box, &value, CHECK);
-    Reflection::field_set(&value, &fd, receiver, value_type, CHECK);
-  }
-JVM_END
-
-
-JVM_ENTRY(void, JVM_SetPrimitiveField(JNIEnv *env, jobject field, jobject obj, jvalue v, unsigned char vCode))
-  JVMWrapper("JVM_SetPrimitiveField");
-  Handle field_mirror(thread, JNIHandles::resolve(field));
-  Handle receiver    (thread, JNIHandles::resolve(obj));
-  fieldDescriptor fd;
-  Reflection::resolve_field(field_mirror, receiver, &fd, true, CHECK);
-  BasicType value_type = (BasicType) vCode;
-  Reflection::field_set(&v, &fd, receiver, value_type, CHECK);
-JVM_END
-
-
 // Method ///////////////////////////////////////////////////////////////////////////////////////////
 
 JVM_ENTRY(jobject, JVM_InvokeMethod(JNIEnv *env, jobject method, jobject obj, jobjectArray args0))
@@ -4302,8 +4051,6 @@
   return res;
 JVM_END
 
-#endif /* SUPPORT_OLD_REFLECTION */
-
 // Atomic ///////////////////////////////////////////////////////////////////////////////////////////
 
 JVM_LEAF(jboolean, JVM_SupportsCX8())
--- a/src/share/vm/prims/jvm.h	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jvm.h	Thu Dec 22 15:46:11 2011 +0000
@@ -26,7 +26,6 @@
 #define SHARE_VM_PRIMS_JVM_H
 
 #include "prims/jni.h"
-#include "runtime/reflectionCompat.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "jvm_linux.h"
 #endif
@@ -43,8 +42,7 @@
 // HotSpot integration note:
 //
 // This file and jvm.h used with the JDK are identical,
-// except for the three includes removed below and the
-// SUPPORT_OLD_REFLECTION sections cut out of the JDK's jvm.h.
+// except for the three includes removed below
 
 // #include <sys/stat.h>
 // #include "jni.h"
@@ -443,14 +441,6 @@
                               jsize len, jobject pd, const char *source,
                               jboolean verify);
 
-/* Define a class with a source (MLVM) */
-JNIEXPORT jclass JNICALL
-JVM_DefineClassWithCP(JNIEnv *env, const char *name, jobject loader,
-                      const jbyte *buf, jsize len, jobject pd,
-                      const char *source,
-                      // same args as JVM_DefineClassWithSource to this point
-                      jobjectArray constants);
-
 /*
  * Reflection support functions
  */
@@ -1442,65 +1432,6 @@
 JNIEXPORT void JNICALL
 JVM_RawMonitorExit(void *mon);
 
-
-#ifdef SUPPORT_OLD_REFLECTION
-
-/*
- * Support for old native code-based (pre-JDK 1.4) reflection implementation.
- * Disabled by default in the product build.
- *
- * See reflection.hpp for information on SUPPORT_OLD_REFLECTION
- */
-
-/*
- * reflecting fields and methods.
- * which: 0 --- MEMBER_PUBLIC
- *        1 --- MEMBER_DECLARED
- * NOTE: absent in product build by default
- */
-
-JNIEXPORT jobjectArray JNICALL
-JVM_GetClassFields(JNIEnv *env, jclass cls, jint which);
-
-JNIEXPORT jobjectArray JNICALL
-JVM_GetClassMethods(JNIEnv *env, jclass cls, jint which);
-
-JNIEXPORT jobjectArray JNICALL
-JVM_GetClassConstructors(JNIEnv *env, jclass cls, jint which);
-
-JNIEXPORT jobject JNICALL
-JVM_GetClassField(JNIEnv *env, jclass cls, jstring name, jint which);
-
-JNIEXPORT jobject JNICALL
-JVM_GetClassMethod(JNIEnv *env, jclass cls, jstring name, jobjectArray types,
-                   jint which);
-JNIEXPORT jobject JNICALL
-JVM_GetClassConstructor(JNIEnv *env, jclass cls, jobjectArray types,
-                        jint which);
-
-/*
- * Implements Class.newInstance
- */
-JNIEXPORT jobject JNICALL
-JVM_NewInstance(JNIEnv *env, jclass cls);
-
-/*
- * java.lang.reflect.Field
- */
-JNIEXPORT jobject JNICALL
-JVM_GetField(JNIEnv *env, jobject field, jobject obj);
-
-JNIEXPORT jvalue JNICALL
-JVM_GetPrimitiveField(JNIEnv *env, jobject field, jobject obj,
-                      unsigned char wCode);
-
-JNIEXPORT void JNICALL
-JVM_SetField(JNIEnv *env, jobject field, jobject obj, jobject val);
-
-JNIEXPORT void JNICALL
-JVM_SetPrimitiveField(JNIEnv *env, jobject field, jobject obj, jvalue v,
-                      unsigned char vCode);
-
 /*
  * java.lang.reflect.Method
  */
@@ -1513,8 +1444,6 @@
 JNIEXPORT jobject JNICALL
 JVM_NewInstanceFromConstructor(JNIEnv *env, jobject c, jobjectArray args0);
 
-#endif /* SUPPORT_OLD_REFLECTION */
-
 /*
  * java.lang.management support
  */
@@ -1650,7 +1579,8 @@
      */
     unsigned int thread_park_blocker : 1;
     unsigned int post_vm_init_hook_enabled : 1;
-    unsigned int : 30;
+    unsigned int pending_list_uses_discovered_field : 1;
+    unsigned int : 29;
     unsigned int : 32;
     unsigned int : 32;
 } jdk_version_info;
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/symbolTable.hpp"
 #include "interpreter/bytecodeStream.hpp"
+#include "oops/fieldStreams.hpp"
 #include "prims/jvmtiClassFileReconstituter.hpp"
 #include "runtime/signature.hpp"
 #ifdef TARGET_ARCH_x86
@@ -52,25 +53,22 @@
 // JVMSpec|     field_info fields[fields_count];
 void JvmtiClassFileReconstituter::write_field_infos() {
   HandleMark hm(thread());
-  typeArrayHandle fields(thread(), ikh()->fields());
-  int fields_length = fields->length();
-  int num_fields = fields_length / instanceKlass::next_offset;
   objArrayHandle fields_anno(thread(), ikh()->fields_annotations());
 
-  write_u2(num_fields);
-  for (int index = 0; index < fields_length; index += instanceKlass::next_offset) {
-    AccessFlags access_flags;
-    int flags = fields->ushort_at(index + instanceKlass::access_flags_offset);
-    access_flags.set_flags(flags);
-    int name_index = fields->ushort_at(index + instanceKlass::name_index_offset);
-    int signature_index = fields->ushort_at(index + instanceKlass::signature_index_offset);
-    int initial_value_index = fields->ushort_at(index + instanceKlass::initval_index_offset);
+  // Compute the real number of Java fields
+  int java_fields = ikh()->java_fields_count();
+
+  write_u2(java_fields);
+  for (JavaFieldStream fs(ikh()); !fs.done(); fs.next()) {
+    AccessFlags access_flags = fs.access_flags();
+    int name_index = fs.name_index();
+    int signature_index = fs.signature_index();
+    int initial_value_index = fs.initval_index();
     guarantee(name_index != 0 && signature_index != 0, "bad constant pool index for field");
-    int offset = ikh()->offset_from_fields( index );
-    int generic_signature_index =
-                        fields->ushort_at(index + instanceKlass::generic_signature_offset);
+    // int offset = ikh()->field_offset( index );
+    int generic_signature_index = fs.generic_signature_index();
     typeArrayHandle anno(thread(), fields_anno.not_null() ?
-                                 (typeArrayOop)(fields_anno->obj_at(index / instanceKlass::next_offset)) :
+                                 (typeArrayOop)(fields_anno->obj_at(fs.index())) :
                                  (typeArrayOop)NULL);
 
     // JVMSpec|   field_info {
@@ -81,7 +79,7 @@
     // JVMSpec|         attribute_info attributes[attributes_count];
     // JVMSpec|   }
 
-    write_u2(flags & JVM_RECOGNIZED_FIELD_MODIFIERS);
+    write_u2(access_flags.as_int() & JVM_RECOGNIZED_FIELD_MODIFIERS);
     write_u2(name_index);
     write_u2(signature_index);
     int attr_count = 0;
--- a/src/share/vm/prims/jvmtiEnv.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2044,7 +2044,6 @@
   // make sure we haven't set this watch before
   if (fdesc_ptr->is_field_access_watched()) return JVMTI_ERROR_DUPLICATE;
   fdesc_ptr->set_is_field_access_watched(true);
-  update_klass_field_access_flag(fdesc_ptr);
 
   JvmtiEventController::change_field_watch(JVMTI_EVENT_FIELD_ACCESS, true);
 
@@ -2057,7 +2056,6 @@
   // make sure we have a watch to clear
   if (!fdesc_ptr->is_field_access_watched()) return JVMTI_ERROR_NOT_FOUND;
   fdesc_ptr->set_is_field_access_watched(false);
-  update_klass_field_access_flag(fdesc_ptr);
 
   JvmtiEventController::change_field_watch(JVMTI_EVENT_FIELD_ACCESS, false);
 
@@ -2070,7 +2068,6 @@
   // make sure we haven't set this watch before
   if (fdesc_ptr->is_field_modification_watched()) return JVMTI_ERROR_DUPLICATE;
   fdesc_ptr->set_is_field_modification_watched(true);
-  update_klass_field_access_flag(fdesc_ptr);
 
   JvmtiEventController::change_field_watch(JVMTI_EVENT_FIELD_MODIFICATION, true);
 
@@ -2083,7 +2080,6 @@
    // make sure we have a watch to clear
   if (!fdesc_ptr->is_field_modification_watched()) return JVMTI_ERROR_NOT_FOUND;
   fdesc_ptr->set_is_field_modification_watched(false);
-  update_klass_field_access_flag(fdesc_ptr);
 
   JvmtiEventController::change_field_watch(JVMTI_EVENT_FIELD_MODIFICATION, false);
 
--- a/src/share/vm/prims/jvmtiEnvBase.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jvmtiEnvBase.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -565,15 +565,6 @@
 }
 
 
-// update the access_flags for the field in the klass
-void
-JvmtiEnvBase::update_klass_field_access_flag(fieldDescriptor *fd) {
-  instanceKlass* ik = instanceKlass::cast(fd->field_holder());
-  typeArrayOop fields = ik->fields();
-  fields->ushort_at_put(fd->index(), (jushort)fd->access_flags().as_short());
-}
-
-
 // return the vframe on the specified thread and depth, NULL if no such frame
 vframe*
 JvmtiEnvBase::vframeFor(JavaThread* java_thread, jint depth) {
--- a/src/share/vm/prims/jvmtiEnvBase.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -267,8 +267,6 @@
   // convert to a jni jclass from a non-null klassOop
   jclass get_jni_class_non_null(klassOop k);
 
-  void update_klass_field_access_flag(fieldDescriptor *fd);
-
   jint count_locked_objects(JavaThread *java_thread, Handle hobj);
   jvmtiError get_locked_objects_in_frame(JavaThread *calling_thread,
                                    JavaThread* java_thread,
--- a/src/share/vm/prims/jvmtiExport.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jvmtiExport.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -66,6 +66,7 @@
 // This class contains the JVMTI interface for the rest of hotspot.
 //
 class JvmtiExport : public AllStatic {
+  friend class VMStructs;
  private:
   static int         _field_access_count;
   static int         _field_modification_count;
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -30,6 +30,7 @@
 #include "interpreter/rewriter.hpp"
 #include "memory/gcLocker.hpp"
 #include "memory/universe.inline.hpp"
+#include "oops/fieldStreams.hpp"
 #include "oops/klassVtable.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "prims/jvmtiRedefineClasses.hpp"
@@ -551,41 +552,35 @@
 
   // Check if the number, names, types and order of fields declared in these classes
   // are the same.
-  typeArrayOop k_old_fields = the_class->fields();
-  typeArrayOop k_new_fields = scratch_class->fields();
-  int n_fields = k_old_fields->length();
-  if (n_fields != k_new_fields->length()) {
-    return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED;
-  }
-
-  for (i = 0; i < n_fields; i += instanceKlass::next_offset) {
+  JavaFieldStream old_fs(the_class);
+  JavaFieldStream new_fs(scratch_class);
+  for (; !old_fs.done() && !new_fs.done(); old_fs.next(), new_fs.next()) {
     // access
-    old_flags = k_old_fields->ushort_at(i + instanceKlass::access_flags_offset);
-    new_flags = k_new_fields->ushort_at(i + instanceKlass::access_flags_offset);
+    old_flags = old_fs.access_flags().as_short();
+    new_flags = new_fs.access_flags().as_short();
     if ((old_flags ^ new_flags) & JVM_RECOGNIZED_FIELD_MODIFIERS) {
       return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED;
     }
     // offset
-    if (k_old_fields->short_at(i + instanceKlass::low_offset) !=
-        k_new_fields->short_at(i + instanceKlass::low_offset) ||
-        k_old_fields->short_at(i + instanceKlass::high_offset) !=
-        k_new_fields->short_at(i + instanceKlass::high_offset)) {
+    if (old_fs.offset() != new_fs.offset()) {
       return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED;
     }
     // name and signature
-    jshort name_index = k_old_fields->short_at(i + instanceKlass::name_index_offset);
-    jshort sig_index = k_old_fields->short_at(i +instanceKlass::signature_index_offset);
-    Symbol* name_sym1 = the_class->constants()->symbol_at(name_index);
-    Symbol* sig_sym1 = the_class->constants()->symbol_at(sig_index);
-    name_index = k_new_fields->short_at(i + instanceKlass::name_index_offset);
-    sig_index = k_new_fields->short_at(i + instanceKlass::signature_index_offset);
-    Symbol* name_sym2 = scratch_class->constants()->symbol_at(name_index);
-    Symbol* sig_sym2 = scratch_class->constants()->symbol_at(sig_index);
+    Symbol* name_sym1 = the_class->constants()->symbol_at(old_fs.name_index());
+    Symbol* sig_sym1 = the_class->constants()->symbol_at(old_fs.signature_index());
+    Symbol* name_sym2 = scratch_class->constants()->symbol_at(new_fs.name_index());
+    Symbol* sig_sym2 = scratch_class->constants()->symbol_at(new_fs.signature_index());
     if (name_sym1 != name_sym2 || sig_sym1 != sig_sym2) {
       return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED;
     }
   }
 
+  // If both streams aren't done then we have a differing number of
+  // fields.
+  if (!old_fs.done() || !new_fs.done()) {
+    return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED;
+  }
+
   // Do a parallel walk through the old and new methods. Detect
   // cases where they match (exist in both), have been added in
   // the new methods, or have been deleted (exist only in the
@@ -2369,38 +2364,34 @@
   int i;  // for portability
 
   // update each field in klass to use new constant pool indices as needed
-  typeArrayHandle fields(THREAD, scratch_class->fields());
-  int n_fields = fields->length();
-  for (i = 0; i < n_fields; i += instanceKlass::next_offset) {
-    jshort cur_index = fields->short_at(i + instanceKlass::name_index_offset);
+  for (JavaFieldStream fs(scratch_class); !fs.done(); fs.next()) {
+    jshort cur_index = fs.name_index();
     jshort new_index = find_new_index(cur_index);
     if (new_index != 0) {
       RC_TRACE_WITH_THREAD(0x00080000, THREAD,
         ("field-name_index change: %d to %d", cur_index, new_index));
-      fields->short_at_put(i + instanceKlass::name_index_offset, new_index);
+      fs.set_name_index(new_index);
     }
-    cur_index = fields->short_at(i + instanceKlass::signature_index_offset);
+    cur_index = fs.signature_index();
     new_index = find_new_index(cur_index);
     if (new_index != 0) {
       RC_TRACE_WITH_THREAD(0x00080000, THREAD,
         ("field-signature_index change: %d to %d", cur_index, new_index));
-      fields->short_at_put(i + instanceKlass::signature_index_offset,
-        new_index);
+      fs.set_signature_index(new_index);
     }
-    cur_index = fields->short_at(i + instanceKlass::initval_index_offset);
+    cur_index = fs.initval_index();
     new_index = find_new_index(cur_index);
     if (new_index != 0) {
       RC_TRACE_WITH_THREAD(0x00080000, THREAD,
         ("field-initval_index change: %d to %d", cur_index, new_index));
-      fields->short_at_put(i + instanceKlass::initval_index_offset, new_index);
+      fs.set_initval_index(new_index);
     }
-    cur_index = fields->short_at(i + instanceKlass::generic_signature_offset);
+    cur_index = fs.generic_signature_index();
     new_index = find_new_index(cur_index);
     if (new_index != 0) {
       RC_TRACE_WITH_THREAD(0x00080000, THREAD,
         ("field-generic_signature change: %d to %d", cur_index, new_index));
-      fields->short_at_put(i + instanceKlass::generic_signature_offset,
-        new_index);
+      fs.set_generic_signature_index(new_index);
     }
   } // end for each field
 
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1647,6 +1647,7 @@
   // saved headers
   static GrowableArray<oop>* _saved_oop_stack;
   static GrowableArray<markOop>* _saved_mark_stack;
+  static bool _needs_reset;                  // do we need to reset mark bits?
 
  public:
   static void init();                       // initialize
@@ -1654,10 +1655,14 @@
 
   static inline void mark(oop o);           // mark an object
   static inline bool visited(oop o);        // check if object has been visited
+
+  static inline bool needs_reset()            { return _needs_reset; }
+  static inline void set_needs_reset(bool v)  { _needs_reset = v; }
 };
 
 GrowableArray<oop>* ObjectMarker::_saved_oop_stack = NULL;
 GrowableArray<markOop>* ObjectMarker::_saved_mark_stack = NULL;
+bool ObjectMarker::_needs_reset = true;  // need to reset mark bits by default
 
 // initialize ObjectMarker - prepares for object marking
 void ObjectMarker::init() {
@@ -1680,7 +1685,13 @@
   // iterate over all objects and restore the mark bits to
   // their initial value
   RestoreMarksClosure blk;
-  Universe::heap()->object_iterate(&blk);
+  if (needs_reset()) {
+    Universe::heap()->object_iterate(&blk);
+  } else {
+    // We don't need to reset mark bits on this call, but reset the
+    // flag to the default for the next call.
+    set_needs_reset(true);
+  }
 
   // When sharing is enabled we need to restore the headers of the objects
   // in the readwrite space too.
@@ -3023,7 +3034,8 @@
 }
 
 
-// collects all simple (non-stack) roots.
+// Collects all simple (non-stack) roots except for threads;
+// threads are handled in collect_stack_roots() as an optimization.
 // if there's a heap root callback provided then the callback is
 // invoked for each simple root.
 // if an object reference callback is provided then all simple
@@ -3054,16 +3066,7 @@
     return false;
   }
 
-  // Threads
-  for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) {
-    oop threadObj = thread->threadObj();
-    if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
-      bool cont = CallbackInvoker::report_simple_root(JVMTI_HEAP_REFERENCE_THREAD, threadObj);
-      if (!cont) {
-        return false;
-      }
-    }
-  }
+  // threads are now handled in collect_stack_roots()
 
   // Other kinds of roots maintained by HotSpot
   // Many of these won't be visible but others (such as instances of important
@@ -3175,13 +3178,20 @@
 }
 
 
-// collects all stack roots - for each thread it walks the execution
+// Collects the simple roots for all threads and collects all
+// stack roots - for each thread it walks the execution
 // stack to find all references and local JNI refs.
 inline bool VM_HeapWalkOperation::collect_stack_roots() {
   JNILocalRootsClosure blk;
   for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) {
     oop threadObj = thread->threadObj();
     if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
+      // Collect the simple root for this thread before we
+      // collect its stack roots
+      if (!CallbackInvoker::report_simple_root(JVMTI_HEAP_REFERENCE_THREAD,
+                                               threadObj)) {
+        return false;
+      }
       if (!collect_stack_roots(thread, &blk)) {
         return false;
       }
@@ -3235,8 +3245,20 @@
 
   // the heap walk starts with an initial object or the heap roots
   if (initial_object().is_null()) {
+    // If either collect_stack_roots() or collect_simple_roots()
+    // returns false at this point, then there are no mark bits
+    // to reset.
+    ObjectMarker::set_needs_reset(false);
+
+    // Calling collect_stack_roots() before collect_simple_roots()
+    // can result in a big performance boost for an agent that is
+    // focused on analyzing references in the thread stacks.
+    if (!collect_stack_roots()) return;
+
     if (!collect_simple_roots()) return;
-    if (!collect_stack_roots()) return;
+
+    // no early return so enable heap traversal to reset the mark bits
+    ObjectMarker::set_needs_reset(true);
   } else {
     visit_stack()->push(initial_object()());
   }
--- a/src/share/vm/prims/methodHandleWalk.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -182,10 +182,6 @@
   HandleMark hm;
   ResourceMark rm;
   Handle mh(m);
-  print(mh);
-}
-
-void MethodHandleChain::print(Handle mh) {
   EXCEPTION_MARK;
   MethodHandleChain mhc(mh, THREAD);
   if (HAS_PENDING_EXCEPTION) {
@@ -222,16 +218,33 @@
       if (o != NULL) {
         if (o->is_instance()) {
           tty->print(" instance %s", o->klass()->klass_part()->internal_name());
+          if (java_lang_invoke_CountingMethodHandle::is_instance(o)) {
+            tty->print(" vmcount: %d", java_lang_invoke_CountingMethodHandle::vmcount(o));
+          }
         } else {
           o->print();
         }
       }
+      oop vmt = chain.vmtarget_oop();
+      if (vmt != NULL) {
+        if (vmt->is_method()) {
+          tty->print(" ");
+          methodOop(vmt)->print_short_name(tty);
+        } else if (java_lang_invoke_MethodHandle::is_instance(vmt)) {
+          tty->print(" method handle " INTPTR_FORMAT, vmt);
+        } else {
+          ShouldNotReachHere();
+        }
+      }
     } else if (chain.is_adapter()) {
       tty->print("adapter: arg_slot %d conversion op %s",
                  chain.adapter_arg_slot(),
                  adapter_op_to_string(chain.adapter_conversion_op()));
       switch (chain.adapter_conversion_op()) {
         case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY:
+          if (java_lang_invoke_CountingMethodHandle::is_instance(chain.method_handle_oop())) {
+            tty->print(" vmcount: %d", java_lang_invoke_CountingMethodHandle::vmcount(chain.method_handle_oop()));
+          }
         case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW:
         case java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST:
         case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM:
@@ -907,7 +920,10 @@
     _non_bcp_klasses(THREAD, 5),
     _cur_stack(0),
     _max_stack(0),
-    _rtype(T_ILLEGAL)
+    _rtype(T_ILLEGAL),
+    _selectAlternative_bci(-1),
+    _taken_count(0),
+    _not_taken_count(0)
 {
 
   // Element zero is always the null constant.
@@ -1115,11 +1131,50 @@
     _bytecode.push(0);
     break;
 
+  case Bytecodes::_ifeq:
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
+    _bytecode.push(op);
+    _bytecode.push(index >> 8);
+    _bytecode.push(index);
+    break;
+
   default:
     ShouldNotReachHere();
   }
 }
 
+void MethodHandleCompiler::update_branch_dest(int src, int dst) {
+  switch (_bytecode.at(src)) {
+    case Bytecodes::_ifeq:
+      dst -= src; // compute the offset
+      assert((unsigned short) dst == dst, "index does not fit in 16-bit");
+      _bytecode.at_put(src + 1, dst >> 8);
+      _bytecode.at_put(src + 2, dst);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+void MethodHandleCompiler::emit_load(ArgToken arg) {
+  TokenType tt = arg.token_type();
+  BasicType bt = arg.basic_type();
+
+  switch (tt) {
+    case tt_parameter:
+    case tt_temporary:
+      emit_load(bt, arg.index());
+      break;
+    case tt_constant:
+      emit_load_constant(arg);
+      break;
+    case tt_illegal:
+    case tt_void:
+    default:
+      ShouldNotReachHere();
+  }
+}
+
 
 void MethodHandleCompiler::emit_load(BasicType bt, int index) {
   if (index <= 3) {
@@ -1318,6 +1373,27 @@
 jvalue MethodHandleCompiler::zero_jvalue = { 0 };
 jvalue MethodHandleCompiler::one_jvalue  = { 1 };
 
+// Fetch any values from CountingMethodHandles and capture them for profiles
+bool MethodHandleCompiler::fetch_counts(ArgToken arg1, ArgToken arg2) {
+  int count1 = -1, count2 = -1;
+  if (arg1.token_type() == tt_constant && arg1.basic_type() == T_OBJECT &&
+      java_lang_invoke_CountingMethodHandle::is_instance(arg1.object()())) {
+    count1 = java_lang_invoke_CountingMethodHandle::vmcount(arg1.object()());
+  }
+  if (arg2.token_type() == tt_constant && arg2.basic_type() == T_OBJECT &&
+      java_lang_invoke_CountingMethodHandle::is_instance(arg2.object()())) {
+    count2 = java_lang_invoke_CountingMethodHandle::vmcount(arg2.object()());
+  }
+  int total = count1 + count2;
+  if (count1 != -1 && count2 != -1 && total != 0) {
+    // Normalize the collect counts to the invoke_count
+    if (count1 != 0) _not_taken_count = (int)(_invoke_count * count1 / (double)total);
+    if (count2 != 0) _taken_count = (int)(_invoke_count * count2 / (double)total);
+    return true;
+  }
+  return false;
+}
+
 // Emit bytecodes for the given invoke instruction.
 MethodHandleWalker::ArgToken
 MethodHandleCompiler::make_invoke(methodHandle m, vmIntrinsics::ID iid,
@@ -1367,6 +1443,29 @@
     }
   }
 
+  if (m->intrinsic_id() == vmIntrinsics::_selectAlternative &&
+      fetch_counts(argv[1], argv[2])) {
+    assert(argc == 3, "three arguments");
+    assert(tailcall, "only");
+
+    // do inline bytecodes so we can drop profile data into it,
+    //   0:   iload_0
+    emit_load(argv[0]);
+    //   1:   ifeq    8
+    _selectAlternative_bci = _bytecode.length();
+    emit_bc(Bytecodes::_ifeq, 0); // emit placeholder offset
+    //   4:   aload_1
+    emit_load(argv[1]);
+    //   5:   areturn;
+    emit_bc(Bytecodes::_areturn);
+    //   8:   aload_2
+    update_branch_dest(_selectAlternative_bci, cur_bci());
+    emit_load(argv[2]);
+    //   9:   areturn
+    emit_bc(Bytecodes::_areturn);
+    return ArgToken();  // Dummy return value.
+  }
+
   check_non_bcp_klass(klass, CHECK_(zero));
   if (m->is_method_handle_invoke()) {
     check_non_bcp_klasses(m->method_handle_type(), CHECK_(zero));
@@ -1377,10 +1476,6 @@
   assert(argc == asc.size() + ((op == Bytecodes::_invokestatic || op == Bytecodes::_invokedynamic) ? 0 : 1),
          "argc mismatch");
 
-  // Inline the method.
-  InvocationCounter* ic = m->invocation_counter();
-  ic->set_carry_flag();
-
   for (int i = 0; i < argc; i++) {
     ArgToken arg = argv[i];
     TokenType tt = arg.token_type();
@@ -1686,7 +1781,7 @@
 }
 
 
-methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const {
+methodHandle MethodHandleCompiler::get_method_oop(TRAPS) {
   methodHandle empty;
   // Create a method that holds the generated bytecode.  invokedynamic
   // has no receiver, normal MH calls do.
@@ -1765,6 +1860,7 @@
     assert(m->method_data() == NULL, "there should not be an MDO yet");
     m->set_method_data(mdo);
 
+    bool found_selectAlternative = false;
     // Iterate over all profile data and set the count of the counter
     // data entries to the original call site counter.
     for (ProfileData* profile_data = mdo->first_data();
@@ -1774,7 +1870,15 @@
         CounterData* counter_data = profile_data->as_CounterData();
         counter_data->set_count(_invoke_count);
       }
+      if (profile_data->is_BranchData() &&
+          profile_data->bci() == _selectAlternative_bci) {
+        BranchData* bd = profile_data->as_BranchData();
+        bd->set_taken(_taken_count);
+        bd->set_not_taken(_not_taken_count);
+        found_selectAlternative = true;
+      }
     }
+    assert(_selectAlternative_bci == -1 || found_selectAlternative, "must have found profile entry");
   }
 
 #ifndef PRODUCT
--- a/src/share/vm/prims/methodHandleWalk.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/methodHandleWalk.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -74,6 +74,7 @@
     set_method_handle(MethodHandle_vmtarget_oop(), THREAD);
   }
 
+  Handle root()                 { return _root; }
   Handle method_handle()        { return _method_handle; }
   oop    method_handle_oop()    { return _method_handle(); }
   oop    method_type_oop()      { return MethodHandle_type_oop(); }
@@ -110,7 +111,6 @@
   // the signature for each method.  The signatures are printed in
   // slot order to make it easier to understand.
   void print();
-  static void print(Handle mh);
   static void print(oopDesc* mh);
 #endif
 };
@@ -277,6 +277,10 @@
   KlassHandle  _target_klass;
   Thread*      _thread;
 
+  int          _selectAlternative_bci; // These are used for capturing profiles from GWTs
+  int          _taken_count;
+  int          _not_taken_count;
+
   // Values used by the compiler.
   static jvalue zero_jvalue;
   static jvalue one_jvalue;
@@ -372,6 +376,7 @@
 
   unsigned char* bytecode()        const { return _bytecode.adr_at(0); }
   int            bytecode_length() const { return _bytecode.length(); }
+  int            cur_bci()         const { return _bytecode.length(); }
 
   // Fake constant pool.
   int cpool_oop_put(int tag, Handle con) {
@@ -436,6 +441,8 @@
   }
 
   void emit_bc(Bytecodes::Code op, int index = 0, int args_size = -1);
+  void update_branch_dest(int src, int dst);
+  void emit_load(ArgToken arg);
   void emit_load(BasicType bt, int index);
   void emit_store(BasicType bt, int index);
   void emit_load_constant(ArgToken arg);
@@ -455,11 +462,14 @@
   virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS);
   virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS);
 
+  // Check for profiling information on a GWT and return true if it's found
+  bool fetch_counts(ArgToken a1, ArgToken a2);
+
   // Get a real constant pool.
   constantPoolHandle get_constant_pool(TRAPS) const;
 
   // Get a real methodOop.
-  methodHandle get_method_oop(TRAPS) const;
+  methodHandle get_method_oop(TRAPS);
 
 public:
   MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool for_invokedynamic, TRAPS);
--- a/src/share/vm/prims/methodHandles.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/methodHandles.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -158,6 +158,8 @@
   "adapter_fold/4/ref",
   "adapter_fold/5/ref",
 
+  "adapter_opt_profiling",
+
   NULL
 };
 
@@ -518,7 +520,7 @@
     int slot  = java_lang_reflect_Field::slot(target_oop);  // fd.index()
     int mods  = java_lang_reflect_Field::modifiers(target_oop);
     klassOop k = java_lang_Class::as_klassOop(clazz);
-    int offset = instanceKlass::cast(k)->offset_from_fields(slot);
+    int offset = instanceKlass::cast(k)->field_offset(slot);
     init_MemberName(mname_oop, k, accessFlags_from(mods), offset);
   } else {
     KlassHandle receiver_limit; int decode_flags = 0;
@@ -1016,7 +1018,7 @@
         && CompilationPolicy::can_be_compiled(m)) {
       // Force compilation
       CompileBroker::compile_method(m, InvocationEntryBci,
-                                    CompLevel_initial_compile,
+                                    CompilationPolicy::policy()->initial_compile_level(),
                                     methodHandle(), 0, "MethodHandleNatives.getTarget",
                                     CHECK_NULL);
     }
@@ -1630,8 +1632,6 @@
     THROW(vmSymbols::java_lang_InternalError());
   }
 
-  java_lang_invoke_MethodHandle::init_vmslots(mh());
-
   if (VerifyMethodHandles) {
     // The privileged code which invokes this routine should not make
     // a mistake about types, but it's better to verify.
@@ -1754,7 +1754,6 @@
   if (m.is_null())      { THROW(vmSymbols::java_lang_InternalError()); }
   if (m->is_abstract()) { THROW(vmSymbols::java_lang_AbstractMethodError()); }
 
-  java_lang_invoke_MethodHandle::init_vmslots(mh());
   int vmargslot = m->size_of_parameters() - 1;
   assert(java_lang_invoke_BoundMethodHandle::vmargslot(mh()) == vmargslot, "");
 
@@ -1860,7 +1859,6 @@
     THROW(vmSymbols::java_lang_InternalError());
   }
 
-  java_lang_invoke_MethodHandle::init_vmslots(mh());
   int argslot = java_lang_invoke_BoundMethodHandle::vmargslot(mh());
 
   if (VerifyMethodHandles) {
@@ -2653,6 +2651,11 @@
   // Finalize the conversion field.  (Note that it is final to Java code.)
   java_lang_invoke_AdapterMethodHandle::set_conversion(mh(), new_conversion);
 
+  if (java_lang_invoke_CountingMethodHandle::is_instance(mh())) {
+    assert(ek_orig == _adapter_retype_only, "only one handled");
+    ek_opt = _adapter_opt_profiling;
+  }
+
   // Done!
   java_lang_invoke_MethodHandle::set_vmentry(mh(), entry(ek_opt));
 
@@ -2679,6 +2682,7 @@
       java_lang_invoke_MethodTypeForm::init_vmlayout(mtform(), cookie);
     }
   }
+  assert(java_lang_invoke_MethodTypeForm::vmslots(mtform()) == argument_slot_count(mtype()), "must agree");
 }
 
 #ifdef ASSERT
@@ -2713,7 +2717,7 @@
         && CompilationPolicy::can_be_compiled(m)) {
       // Force compilation
       CompileBroker::compile_method(m, InvocationEntryBci,
-                                    CompLevel_initial_compile,
+                                    CompilationPolicy::policy()->initial_compile_level(),
                                     methodHandle(), 0, "StressMethodHandleWalk",
                                     CHECK);
     }
@@ -2905,8 +2909,12 @@
     return MethodHandles::stack_move_unit();
   case MethodHandles::GC_CONV_OP_IMPLEMENTED_MASK:
     return MethodHandles::adapter_conversion_ops_supported_mask();
-  case MethodHandles::GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS:
-    return MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS;
+  case MethodHandles::GC_COUNT_GWT:
+#ifdef COMPILER2
+    return true;
+#else
+    return false;
+#endif
   }
   return 0;
 }
@@ -3070,6 +3078,30 @@
 }
 JVM_END
 
+JVM_ENTRY(void, MHN_setCallSiteTargetNormal(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) {
+  oop call_site = JNIHandles::resolve_non_null(call_site_jh);
+  oop target    = JNIHandles::resolve(target_jh);
+  {
+    // Walk all nmethods depending on this call site.
+    MutexLocker mu(Compile_lock, thread);
+    Universe::flush_dependents_on(call_site, target);
+  }
+  java_lang_invoke_CallSite::set_target(call_site, target);
+}
+JVM_END
+
+JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) {
+  oop call_site = JNIHandles::resolve_non_null(call_site_jh);
+  oop target    = JNIHandles::resolve(target_jh);
+  {
+    // Walk all nmethods depending on this call site.
+    MutexLocker mu(Compile_lock, thread);
+    Universe::flush_dependents_on(call_site, target);
+  }
+  java_lang_invoke_CallSite::set_target_volatile(call_site, target);
+}
+JVM_END
+
 methodOop MethodHandles::resolve_raise_exception_method(TRAPS) {
   if (_raise_exception_method != NULL) {
     // no need to do it twice
@@ -3126,12 +3158,15 @@
 
 /// JVM_RegisterMethodHandleMethods
 
+#undef CS  // Solaris builds complain
+
 #define LANG "Ljava/lang/"
 #define JLINV "Ljava/lang/invoke/"
 
 #define OBJ   LANG"Object;"
 #define CLS   LANG"Class;"
 #define STRG  LANG"String;"
+#define CS    JLINV"CallSite;"
 #define MT    JLINV"MethodType;"
 #define MH    JLINV"MethodHandle;"
 #define MEM   JLINV"MemberName;"
@@ -3142,29 +3177,34 @@
 #define CC (char*)  /*cast a literal from (const char*)*/
 #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
 
-// These are the native methods on sun.invoke.MethodHandleNatives.
+// These are the native methods on java.lang.invoke.MethodHandleNatives.
 static JNINativeMethod methods[] = {
   // void init(MemberName self, AccessibleObject ref)
-  {CC"init",                    CC"("AMH""MH"I)V",              FN_PTR(MHN_init_AMH)},
-  {CC"init",                    CC"("BMH""OBJ"I)V",             FN_PTR(MHN_init_BMH)},
-  {CC"init",                    CC"("DMH""OBJ"Z"CLS")V",        FN_PTR(MHN_init_DMH)},
-  {CC"init",                    CC"("MT")V",                    FN_PTR(MHN_init_MT)},
-  {CC"init",                    CC"("MEM""OBJ")V",              FN_PTR(MHN_init_Mem)},
-  {CC"expand",                  CC"("MEM")V",                   FN_PTR(MHN_expand_Mem)},
-  {CC"resolve",                 CC"("MEM""CLS")V",              FN_PTR(MHN_resolve_Mem)},
-  {CC"getTarget",               CC"("MH"I)"OBJ,                 FN_PTR(MHN_getTarget)},
-  {CC"getConstant",             CC"(I)I",                       FN_PTR(MHN_getConstant)},
+  {CC"init",                      CC"("AMH""MH"I)V",                     FN_PTR(MHN_init_AMH)},
+  {CC"init",                      CC"("BMH""OBJ"I)V",                    FN_PTR(MHN_init_BMH)},
+  {CC"init",                      CC"("DMH""OBJ"Z"CLS")V",               FN_PTR(MHN_init_DMH)},
+  {CC"init",                      CC"("MT")V",                           FN_PTR(MHN_init_MT)},
+  {CC"init",                      CC"("MEM""OBJ")V",                     FN_PTR(MHN_init_Mem)},
+  {CC"expand",                    CC"("MEM")V",                          FN_PTR(MHN_expand_Mem)},
+  {CC"resolve",                   CC"("MEM""CLS")V",                     FN_PTR(MHN_resolve_Mem)},
+  {CC"getTarget",                 CC"("MH"I)"OBJ,                        FN_PTR(MHN_getTarget)},
+  {CC"getConstant",               CC"(I)I",                              FN_PTR(MHN_getConstant)},
   //  static native int getNamedCon(int which, Object[] name)
-  {CC"getNamedCon",             CC"(I["OBJ")I",                 FN_PTR(MHN_getNamedCon)},
+  {CC"getNamedCon",               CC"(I["OBJ")I",                        FN_PTR(MHN_getNamedCon)},
   //  static native int getMembers(Class<?> defc, String matchName, String matchSig,
   //          int matchFlags, Class<?> caller, int skip, MemberName[] results);
-  {CC"getMembers",              CC"("CLS""STRG""STRG"I"CLS"I["MEM")I",  FN_PTR(MHN_getMembers)}
+  {CC"getMembers",                CC"("CLS""STRG""STRG"I"CLS"I["MEM")I", FN_PTR(MHN_getMembers)}
+};
+
+static JNINativeMethod call_site_methods[] = {
+  {CC"setCallSiteTargetNormal",   CC"("CS""MH")V",                       FN_PTR(MHN_setCallSiteTargetNormal)},
+  {CC"setCallSiteTargetVolatile", CC"("CS""MH")V",                       FN_PTR(MHN_setCallSiteTargetVolatile)}
 };
 
 static JNINativeMethod invoke_methods[] = {
   // void init(MemberName self, AccessibleObject ref)
-  {CC"invoke",                  CC"(["OBJ")"OBJ,                FN_PTR(MH_invoke_UOE)},
-  {CC"invokeExact",             CC"(["OBJ")"OBJ,                FN_PTR(MH_invokeExact_UOE)}
+  {CC"invoke",                    CC"(["OBJ")"OBJ,                       FN_PTR(MH_invoke_UOE)},
+  {CC"invokeExact",               CC"(["OBJ")"OBJ,                       FN_PTR(MH_invokeExact_UOE)}
 };
 
 // This one function is exported, used by NativeLookup.
@@ -3177,11 +3217,11 @@
     return;  // bind nothing
   }
 
+  assert(!MethodHandles::enabled(), "must not be enabled");
   bool enable_MH = true;
 
   {
     ThreadToNativeFromVM ttnfv(thread);
-
     int status = env->RegisterNatives(MHN_class, methods, sizeof(methods)/sizeof(JNINativeMethod));
     if (!env->ExceptionOccurred()) {
       const char* L_MH_name = (JLINV "MethodHandle");
@@ -3190,11 +3230,16 @@
       status = env->RegisterNatives(MH_class, invoke_methods, sizeof(invoke_methods)/sizeof(JNINativeMethod));
     }
     if (env->ExceptionOccurred()) {
-      MethodHandles::set_enabled(false);
       warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
       enable_MH = false;
       env->ExceptionClear();
     }
+
+    status = env->RegisterNatives(MHN_class, call_site_methods, sizeof(call_site_methods)/sizeof(JNINativeMethod));
+    if (env->ExceptionOccurred()) {
+      // Exception is okay until 7087357
+      env->ExceptionClear();
+    }
   }
 
   if (enable_MH) {
--- a/src/share/vm/prims/methodHandles.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/methodHandles.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -187,6 +187,8 @@
     _adapter_opt_fold_FIRST = _adapter_opt_fold_ref,
     _adapter_opt_fold_LAST  = _adapter_opt_fold_5_ref,
 
+    _adapter_opt_profiling,
+
     _EK_LIMIT,
     _EK_FIRST = 0
   };
@@ -266,6 +268,8 @@
       return _adapter_fold_args;
     if (ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST)
       return _adapter_opt_return_any;
+    if (ek == _adapter_opt_profiling)
+      return _adapter_retype_only;
     assert(false, "oob");
     return _EK_LIMIT;
   }
@@ -511,11 +515,12 @@
   }
   // Here is the transformation the i2i adapter must perform:
   static int truncate_subword_from_vminfo(jint value, int vminfo) {
-    jint tem = value << vminfo;
+    int shift = vminfo & ~CONV_VMINFO_SIGN_FLAG;
+    jint tem = value << shift;
     if ((vminfo & CONV_VMINFO_SIGN_FLAG) != 0) {
-      return (jint)tem >> vminfo;
+      return (jint)tem >> shift;
     } else {
-      return (juint)tem >> vminfo;
+      return (juint)tem >> shift;
     }
   }
 
@@ -582,6 +587,7 @@
     GC_JVM_STACK_MOVE_UNIT = 1,
     GC_CONV_OP_IMPLEMENTED_MASK = 2,
     GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS = 3,
+    GC_COUNT_GWT = 4,
 
     // format of result from getTarget / encode_target:
     ETF_HANDLE_OR_METHOD_NAME = 0, // all available data (immediate MH or method)
--- a/src/share/vm/prims/unsafe.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/prims/unsafe.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -33,7 +33,6 @@
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/reflection.hpp"
-#include "runtime/reflectionCompat.hpp"
 #include "runtime/synchronizer.hpp"
 #include "services/threadService.hpp"
 #include "utilities/copy.hpp"
@@ -303,6 +302,19 @@
   UnsafeWrapper("Unsafe_SetObjectVolatile");
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
+  // Catch VolatileCallSite.target stores (via
+  // CallSite.setTargetVolatile) and check call site dependencies.
+  if ((offset == java_lang_invoke_CallSite::target_offset_in_bytes()) && p->is_a(SystemDictionary::CallSite_klass())) {
+    oop call_site     = p;
+    oop method_handle = x;
+    assert(call_site    ->is_a(SystemDictionary::CallSite_klass()),     "must be");
+    assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "must be");
+    {
+      // Walk all nmethods depending on this call site.
+      MutexLocker mu(Compile_lock, thread);
+      Universe::flush_dependents_on(call_site, method_handle);
+    }
+  }
   void* addr = index_oop_from_field_offset_long(p, offset);
   OrderAccess::release();
   if (UseCompressedOops) {
@@ -707,7 +719,7 @@
     }
   }
 
-  int offset = instanceKlass::cast(k)->offset_from_fields(slot);
+  int offset = instanceKlass::cast(k)->field_offset(slot);
   return field_offset_from_byte_offset(offset);
 }
 
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -171,7 +171,7 @@
       // If a method has been stale for some time, remove it from the queue.
       if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
         if (PrintTieredEvents) {
-          print_event(KILL, method, method, task->osr_bci(), (CompLevel)task->comp_level());
+          print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
         }
         CompileTaskWrapper ctw(task); // Frees the task
         compile_queue->remove(task);
@@ -189,10 +189,11 @@
     task = next_task;
   }
 
-  if (max_task->comp_level() == CompLevel_full_profile && is_method_profiled(max_method)) {
+  if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
+      && is_method_profiled(max_method)) {
     max_task->set_comp_level(CompLevel_limited_profile);
     if (PrintTieredEvents) {
-      print_event(UPDATE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
+      print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
     }
   }
 
@@ -259,6 +260,17 @@
   return false;
 }
 
+// Inlining control: if we're compiling a profiled method with C1 and the callee
+// is known to have OSRed in a C2 version, don't inline it.
+bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
+  CompLevel comp_level = (CompLevel)env->comp_level();
+  if (comp_level == CompLevel_full_profile ||
+      comp_level == CompLevel_limited_profile) {
+    return callee->highest_osr_comp_level() == CompLevel_full_optimization;
+  }
+  return false;
+}
+
 // Create MDO if necessary.
 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, TRAPS) {
   if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
@@ -310,76 +322,79 @@
  */
 
 // Common transition function. Given a predicate determines if a method should transition to another level.
-CompLevel AdvancedThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level) {
-  if (is_trivial(method)) return CompLevel_simple;
-
+CompLevel AdvancedThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level, bool disable_feedback) {
   CompLevel next_level = cur_level;
   int i = method->invocation_count();
   int b = method->backedge_count();
 
-  switch(cur_level) {
-  case CompLevel_none:
-    // If we were at full profile level, would we switch to full opt?
-    if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
-      next_level = CompLevel_full_optimization;
-    } else if ((this->*p)(i, b, cur_level)) {
-      // C1-generated fully profiled code is about 30% slower than the limited profile
-      // code that has only invocation and backedge counters. The observation is that
-      // if C2 queue is large enough we can spend too much time in the fully profiled code
-      // while waiting for C2 to pick the method from the queue. To alleviate this problem
-      // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
-      // we choose to compile a limited profiled version and then recompile with full profiling
-      // when the load on C2 goes down.
-      if (CompileBroker::queue_size(CompLevel_full_optimization) >
-          Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
-        next_level = CompLevel_limited_profile;
-      } else {
-        next_level = CompLevel_full_profile;
-      }
-    }
-    break;
-  case CompLevel_limited_profile:
-    if (is_method_profiled(method)) {
-      // Special case: we got here because this method was fully profiled in the interpreter.
-      next_level = CompLevel_full_optimization;
-    } else {
-      methodDataOop mdo = method->method_data();
-      if (mdo != NULL) {
-        if (mdo->would_profile()) {
-          if (CompileBroker::queue_size(CompLevel_full_optimization) <=
-              Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
-              (this->*p)(i, b, cur_level)) {
-            next_level = CompLevel_full_profile;
-          }
+  if (is_trivial(method)) {
+    next_level = CompLevel_simple;
+  } else {
+    switch(cur_level) {
+    case CompLevel_none:
+      // If we were at full profile level, would we switch to full opt?
+      if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
+        next_level = CompLevel_full_optimization;
+      } else if ((this->*p)(i, b, cur_level)) {
+        // C1-generated fully profiled code is about 30% slower than the limited profile
+        // code that has only invocation and backedge counters. The observation is that
+        // if C2 queue is large enough we can spend too much time in the fully profiled code
+        // while waiting for C2 to pick the method from the queue. To alleviate this problem
+        // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
+        // we choose to compile a limited profiled version and then recompile with full profiling
+        // when the load on C2 goes down.
+        if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
+                                 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
+          next_level = CompLevel_limited_profile;
         } else {
-          next_level = CompLevel_full_optimization;
+          next_level = CompLevel_full_profile;
         }
       }
-    }
-    break;
-  case CompLevel_full_profile:
-    {
-      methodDataOop mdo = method->method_data();
-      if (mdo != NULL) {
-        if (mdo->would_profile()) {
-          int mdo_i = mdo->invocation_count_delta();
-          int mdo_b = mdo->backedge_count_delta();
-          if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+      break;
+    case CompLevel_limited_profile:
+      if (is_method_profiled(method)) {
+        // Special case: we got here because this method was fully profiled in the interpreter.
+        next_level = CompLevel_full_optimization;
+      } else {
+        methodDataOop mdo = method->method_data();
+        if (mdo != NULL) {
+          if (mdo->would_profile()) {
+            if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
+                                     Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
+                                     (this->*p)(i, b, cur_level))) {
+              next_level = CompLevel_full_profile;
+            }
+          } else {
             next_level = CompLevel_full_optimization;
           }
-        } else {
-          next_level = CompLevel_full_optimization;
         }
       }
+      break;
+    case CompLevel_full_profile:
+      {
+        methodDataOop mdo = method->method_data();
+        if (mdo != NULL) {
+          if (mdo->would_profile()) {
+            int mdo_i = mdo->invocation_count_delta();
+            int mdo_b = mdo->backedge_count_delta();
+            if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+              next_level = CompLevel_full_optimization;
+            }
+          } else {
+            next_level = CompLevel_full_optimization;
+          }
+        }
+      }
+      break;
     }
-    break;
   }
-  return next_level;
+  return MIN2(next_level, (CompLevel)TieredStopAtLevel);
 }
 
 // Determine if a method should be compiled with a normal entry point at a different level.
-CompLevel AdvancedThresholdPolicy::call_event(methodOop method,  CompLevel cur_level) {
-  CompLevel osr_level = (CompLevel) method->highest_osr_comp_level();
+CompLevel AdvancedThresholdPolicy::call_event(methodOop method, CompLevel cur_level) {
+  CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
+                             common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
   CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
 
   // If OSR method level is greater than the regular method level, the levels should be
@@ -394,21 +409,21 @@
   } else {
     next_level = MAX2(osr_level, next_level);
   }
-
   return next_level;
 }
 
 // Determine if we should do an OSR compilation of a given method.
 CompLevel AdvancedThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) {
+  CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
   if (cur_level == CompLevel_none) {
     // If there is a live OSR method that means that we deopted to the interpreter
     // for the transition.
-    CompLevel osr_level = (CompLevel)method->highest_osr_comp_level();
+    CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
     if (osr_level > CompLevel_none) {
       return osr_level;
     }
   }
-  return common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level);
+  return next_level;
 }
 
 // Update the rate and submit compile
@@ -418,10 +433,9 @@
   CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
 }
 
-
 // Handle the invocation event.
 void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
-                                                      CompLevel level, TRAPS) {
+                                                      CompLevel level, nmethod* nm, TRAPS) {
   if (should_create_mdo(mh(), level)) {
     create_mdo(mh, THREAD);
   }
@@ -436,32 +450,68 @@
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
 void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
-                                                       int bci, CompLevel level, TRAPS) {
+                                                       int bci, CompLevel level, nmethod* nm, TRAPS) {
   if (should_create_mdo(mh(), level)) {
     create_mdo(mh, THREAD);
   }
+  // Check if MDO should be created for the inlined method
+  if (should_create_mdo(imh(), level)) {
+    create_mdo(imh, THREAD);
+  }
 
-  // If the method is already compiling, quickly bail out.
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
-    // Use loop event as an opportinity to also check there's been
-    // enough calls.
-    CompLevel cur_level = comp_level(mh());
-    CompLevel next_level = call_event(mh(), cur_level);
-    CompLevel next_osr_level = loop_event(mh(), level);
-    if (next_osr_level  == CompLevel_limited_profile) {
-      next_osr_level = CompLevel_full_profile; // OSRs are supposed to be for very hot methods.
-    }
-    next_level = MAX2(next_level,
-                      next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level);
-    bool is_compiling = false;
-    if (next_level != cur_level) {
-      compile(mh, InvocationEntryBci, next_level, THREAD);
-      is_compiling = true;
+  if (is_compilation_enabled()) {
+    CompLevel next_osr_level = loop_event(imh(), level);
+    CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
+    // At the very least compile the OSR version
+    if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
+      compile(imh, bci, next_osr_level, THREAD);
     }
 
-    // Do the OSR version
-    if (!is_compiling && next_osr_level != level) {
-      compile(mh, bci, next_osr_level, THREAD);
+    // Use loop event as an opportunity to also check if there's been
+    // enough calls.
+    CompLevel cur_level, next_level;
+    if (mh() != imh()) { // If there is an enclosing method
+      guarantee(nm != NULL, "Should have nmethod here");
+      cur_level = comp_level(mh());
+      next_level = call_event(mh(), cur_level);
+
+      if (max_osr_level == CompLevel_full_optimization) {
+        // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
+        bool make_not_entrant = false;
+        if (nm->is_osr_method()) {
+          // This is an osr method, just make it not entrant and recompile later if needed
+          make_not_entrant = true;
+        } else {
+          if (next_level != CompLevel_full_optimization) {
+            // next_level is not full opt, so we need to recompile the
+            // enclosing method without the inlinee
+            cur_level = CompLevel_none;
+            make_not_entrant = true;
+          }
+        }
+        if (make_not_entrant) {
+          if (PrintTieredEvents) {
+            int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
+            print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
+          }
+          nm->make_not_entrant();
+        }
+      }
+      if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+        // Fix up next_level if necessary to avoid deopts
+        if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
+          next_level = CompLevel_full_profile;
+        }
+        if (cur_level != next_level) {
+          compile(mh, InvocationEntryBci, next_level, THREAD);
+        }
+      }
+    } else {
+      cur_level = comp_level(imh());
+      next_level = call_event(imh(), cur_level);
+      if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
+        compile(imh, InvocationEntryBci, next_level, THREAD);
+      }
     }
   }
 }
--- a/src/share/vm/runtime/advancedThresholdPolicy.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -168,7 +168,7 @@
   bool call_predicate(int i, int b, CompLevel cur_level);
   bool loop_predicate(int i, int b, CompLevel cur_level);
   // Common transition function. Given a predicate determines if a method should transition to another level.
-  CompLevel common(Predicate p, methodOop method, CompLevel cur_level);
+  CompLevel common(Predicate p, methodOop method, CompLevel cur_level, bool disable_feedback = false);
   // Transition functions.
   // call_event determines if a method should be compiled at a different
   // level with a regular invocation entry.
@@ -211,14 +211,16 @@
   virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
   // event() from SimpleThresholdPolicy would call these.
   virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
-                                       CompLevel level, TRAPS);
+                                       CompLevel level, nmethod* nm, TRAPS);
   virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
-                                        int bci, CompLevel level, TRAPS);
+                                        int bci, CompLevel level, nmethod* nm, TRAPS);
 public:
   AdvancedThresholdPolicy() : _start_time(0) { }
   // Select task is called by CompileBroker. We should return a task or NULL.
   virtual CompileTask* select_task(CompileQueue* compile_queue);
   virtual void initialize();
+  virtual bool should_not_inline(ciEnv* env, ciMethod* callee);
+
 };
 
 #endif // TIERED
--- a/src/share/vm/runtime/arguments.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -37,15 +37,6 @@
 #include "services/management.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/taskqueue.hpp"
-#ifdef TARGET_ARCH_x86
-# include "vm_version_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "vm_version_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "vm_version_zero.hpp"
-#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 #endif
@@ -250,6 +241,11 @@
   { "UseParallelOldGCDensePrefix",
                            JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
   { "AllowTransitionalJSR292",       JDK_Version::jdk(7), JDK_Version::jdk(8) },
+  { "UseCompressedStrings",          JDK_Version::jdk(7), JDK_Version::jdk(8) },
+#ifdef PRODUCT
+  { "DesiredMethodLimit",
+                           JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
+#endif // PRODUCT
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
 
@@ -1426,6 +1422,9 @@
     if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
       FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
     }
+    // For those collectors or operating systems (eg, Windows) that do
+    // not support full UseNUMA, we will map to UseNUMAInterleaving for now
+    UseNUMAInterleaving = true;
   }
 }
 
@@ -1679,8 +1678,33 @@
           UseParallelGC || UseParallelOldGC));
 }
 
+// check if do gclog rotation
+// +UseGCLogFileRotation is a must,
+// no gc log rotation when log file not supplied or
+// NumberOfGCLogFiles is 0, or GCLogFileSize is 0
+void check_gclog_consistency() {
+  if (UseGCLogFileRotation) {
+    if ((Arguments::gc_log_filename() == NULL) ||
+        (NumberOfGCLogFiles == 0)  ||
+        (GCLogFileSize == 0)) {
+      jio_fprintf(defaultStream::output_stream(),
+                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>\n"
+                  "where num_of_file > 0 and num_of_size > 0\n"
+                  "GC log rotation is turned off\n");
+      UseGCLogFileRotation = false;
+    }
+  }
+
+  if (UseGCLogFileRotation && GCLogFileSize < 8*K) {
+        FLAG_SET_CMDLINE(uintx, GCLogFileSize, 8*K);
+        jio_fprintf(defaultStream::output_stream(),
+                    "GCLogFileSize changed to minimum 8K\n");
+  }
+}
+
 // Check consistency of GC selection
 bool Arguments::check_gc_consistency() {
+  check_gclog_consistency();
   bool status = true;
   // Ensure that the user has not selected conflicting sets
   // of collectors. [Note: this check is merely a user convenience;
@@ -2671,6 +2695,7 @@
       return JNI_ERR;
     }
   }
+
   // Change the default value for flags  which have different default values
   // when working with older JDKs.
   if (JDK_Version::current().compare_major(6) <= 0 &&
@@ -2885,6 +2910,18 @@
   }
 }
 
+// Disable options not supported in this release, with a warning if they
+// were explicitly requested on the command-line
+#define UNSUPPORTED_OPTION(opt, description)                    \
+do {                                                            \
+  if (opt) {                                                    \
+    if (FLAG_IS_CMDLINE(opt)) {                                 \
+      warning(description " is disabled in this release.");     \
+    }                                                           \
+    FLAG_SET_DEFAULT(opt, false);                               \
+  }                                                             \
+} while(0)
+
 // Parse entry point called from JNI_CreateJavaVM
 
 jint Arguments::parse(const JavaVMInitArgs* args) {
@@ -2982,6 +3019,10 @@
     return result;
   }
 
+#ifdef JAVASE_EMBEDDED
+  UNSUPPORTED_OPTION(UseG1GC, "G1 GC");
+#endif
+
 #ifndef PRODUCT
   if (TraceBytecodesAt != 0) {
     TraceBytecodes = true;
--- a/src/share/vm/runtime/atomic.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/atomic.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -51,6 +51,12 @@
 #ifdef TARGET_OS_ARCH_windows_x86
 # include "atomic_windows_x86.inline.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_linux_arm
+# include "atomic_linux_arm.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_ppc
+# include "atomic_linux_ppc.inline.hpp"
+#endif
 
 jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
   assert(sizeof(jbyte) == 1, "assumption.");
@@ -83,3 +89,13 @@
   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
                                        (jint)compare_value);
 }
+
+jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
+  jlong old = load(dest);
+  jlong new_value = old + add_value;
+  while (old != cmpxchg(new_value, dest, old)) {
+    old = load(dest);
+    new_value = old + add_value;
+  }
+  return old;
+}
--- a/src/share/vm/runtime/atomic.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/atomic.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -51,6 +51,8 @@
   static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
   static void*    add_ptr(intptr_t add_value, volatile void*     dest);
 
+  static jlong    add    (jlong    add_value, volatile jlong*    dest);
+
   // Atomically increment location
   static void inc    (volatile jint*     dest);
   static void inc_ptr(volatile intptr_t* dest);
--- a/src/share/vm/runtime/compilationPolicy.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/compilationPolicy.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -306,7 +306,7 @@
   return (current >= initial + target);
 }
 
-nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) {
+nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
   assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
   NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
   if (JvmtiExport::can_post_interpreter_events()) {
--- a/src/share/vm/runtime/compilationPolicy.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/compilationPolicy.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -59,10 +59,12 @@
   // Profiling
   elapsedTimer* accumulated_time() { return &_accumulated_time; }
   void print_time() PRODUCT_RETURN;
+  // Return initial compile level that is used with Xcomp
+  virtual CompLevel initial_compile_level() = 0;
   virtual int compiler_count(CompLevel comp_level) = 0;
   // main notification entry, return a pointer to an nmethod if the OSR is required,
   // returns NULL otherwise.
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) = 0;
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) = 0;
   // safepoint() is called at the end of the safepoint
   virtual void do_safepoint_work() = 0;
   // reprofile request
@@ -80,6 +82,7 @@
   virtual bool is_mature(methodOop method) = 0;
   // Do policy initialization
   virtual void initialize() = 0;
+  virtual bool should_not_inline(ciEnv* env, ciMethod* method) { return false; }
 };
 
 // A base class for baseline policies.
@@ -93,6 +96,7 @@
   void reset_counter_for_back_branch_event(methodHandle method);
 public:
   NonTieredCompPolicy() : _compiler_count(0) { }
+  virtual CompLevel initial_compile_level() { return CompLevel_initial_compile; }
   virtual int compiler_count(CompLevel comp_level);
   virtual void do_safepoint_work();
   virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
@@ -101,7 +105,7 @@
   virtual bool is_mature(methodOop method);
   virtual void initialize();
   virtual CompileTask* select_task(CompileQueue* compile_queue);
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS);
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
   virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
   virtual void method_back_branch_event(methodHandle m, int bci, TRAPS) = 0;
 };
--- a/src/share/vm/runtime/deoptimization.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/deoptimization.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -103,7 +103,7 @@
   _frame_pcs                 = frame_pcs;
   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2);
   _return_type               = return_type;
-  _initial_fp                = 0;
+  _initial_info              = 0;
   // PD (x86 only)
   _counter_temp              = 0;
   _unpack_kind               = 0;
@@ -486,9 +486,10 @@
                                       frame_sizes,
                                       frame_pcs,
                                       return_type);
-  // On some platforms, we need a way to pass fp to the unpacking code
-  // so the skeletal frames come out correct.
-  info->set_initial_fp((intptr_t) array->sender().fp());
+  // On some platforms, we need a way to pass some platform dependent
+  // information to the unpacking code so the skeletal frames come out
+  // correct (initial fp value, unextended sp, ...)
+  info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
 
   if (array->frames() > 1) {
     if (VerifyStack && TraceDeoptimization) {
--- a/src/share/vm/runtime/deoptimization.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/deoptimization.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -34,6 +34,8 @@
 class ObjectValue;
 
 class Deoptimization : AllStatic {
+  friend class VMStructs;
+
  public:
   // What condition caused the deoptimization?
   enum DeoptReason {
@@ -137,7 +139,7 @@
     address*  _frame_pcs;                 // Array of frame pc's, in bytes, for unrolling the stack
     intptr_t* _register_block;            // Block for storing callee-saved registers.
     BasicType _return_type;               // Tells if we have to restore double or long return value
-    intptr_t  _initial_fp;                // FP of the sender frame
+    intptr_t  _initial_info;              // Platform dependent data for the sender frame (was FP on x86)
     int       _caller_actual_parameters;  // The number of actual arguments at the
                                           // interpreted caller of the deoptimized frame
 
@@ -170,7 +172,7 @@
     // Returns the total size of frames
     int size_of_frames() const;
 
-    void set_initial_fp(intptr_t fp) { _initial_fp = fp; }
+    void set_initial_info(intptr_t info) { _initial_info = info; }
 
     int caller_actual_parameters() const { return _caller_actual_parameters; }
 
@@ -184,7 +186,7 @@
     static int register_block_offset_in_bytes()            { return offset_of(UnrollBlock, _register_block);            }
     static int return_type_offset_in_bytes()               { return offset_of(UnrollBlock, _return_type);               }
     static int counter_temp_offset_in_bytes()              { return offset_of(UnrollBlock, _counter_temp);              }
-    static int initial_fp_offset_in_bytes()                { return offset_of(UnrollBlock, _initial_fp);                }
+    static int initial_info_offset_in_bytes()              { return offset_of(UnrollBlock, _initial_info);              }
     static int unpack_kind_offset_in_bytes()               { return offset_of(UnrollBlock, _unpack_kind);               }
     static int sender_sp_temp_offset_in_bytes()            { return offset_of(UnrollBlock, _sender_sp_temp);            }
 
--- a/src/share/vm/runtime/fieldDescriptor.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/fieldDescriptor.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -42,59 +42,51 @@
   objArrayOop md = ik->fields_annotations();
   if (md == NULL)
     return NULL;
-  assert((index() % instanceKlass::next_offset) == 0, "");
-  return typeArrayOop(md->obj_at(index() / instanceKlass::next_offset));
+  return typeArrayOop(md->obj_at(index()));
 }
 
 constantTag fieldDescriptor::initial_value_tag() const {
-  return constants()->tag_at(_initial_value_index);
+  return constants()->tag_at(initial_value_index());
 }
 
 jint fieldDescriptor::int_initial_value() const {
-  return constants()->int_at(_initial_value_index);
+  return constants()->int_at(initial_value_index());
 }
 
 jlong fieldDescriptor::long_initial_value() const {
-  return constants()->long_at(_initial_value_index);
+  return constants()->long_at(initial_value_index());
 }
 
 jfloat fieldDescriptor::float_initial_value() const {
-  return constants()->float_at(_initial_value_index);
+  return constants()->float_at(initial_value_index());
 }
 
 jdouble fieldDescriptor::double_initial_value() const {
-  return constants()->double_at(_initial_value_index);
+  return constants()->double_at(initial_value_index());
 }
 
 oop fieldDescriptor::string_initial_value(TRAPS) const {
-  return constants()->string_at(_initial_value_index, CHECK_0);
+  return constants()->string_at(initial_value_index(), CHECK_0);
 }
 
 void fieldDescriptor::initialize(klassOop k, int index) {
   instanceKlass* ik = instanceKlass::cast(k);
   _cp = ik->constants();
-  typeArrayOop fields = ik->fields();
-
-  assert(fields->length() % instanceKlass::next_offset == 0, "Illegal size of field array");
-  assert(fields->length() >= index + instanceKlass::next_offset, "Illegal size of field array");
+  FieldInfo* f = ik->field(index);
+  assert(!f->is_internal(), "regular Java fields only");
 
-  _access_flags.set_field_flags(fields->ushort_at(index + instanceKlass::access_flags_offset));
-  _name_index = fields->ushort_at(index + instanceKlass::name_index_offset);
-  _signature_index = fields->ushort_at(index + instanceKlass::signature_index_offset);
-  _initial_value_index = fields->ushort_at(index + instanceKlass::initval_index_offset);
-  guarantee(_name_index != 0 && _signature_index != 0, "bad constant pool index for fieldDescriptor");
-  _offset = ik->offset_from_fields( index );
-  _generic_signature_index = fields->ushort_at(index + instanceKlass::generic_signature_offset);
+  _access_flags = accessFlags_from(f->access_flags());
+  guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor");
   _index = index;
 }
 
 #ifndef PRODUCT
 
 void fieldDescriptor::print_on(outputStream* st) const {
-  _access_flags.print_on(st);
-  constants()->symbol_at(_name_index)->print_value_on(st);
+  access_flags().print_on(st);
+  name()->print_value_on(st);
   st->print(" ");
-  constants()->symbol_at(_signature_index)->print_value_on(st);
+  signature()->print_value_on(st);
   st->print(" @%d ", offset());
   if (WizardMode && has_initial_value()) {
     st->print("(initval ");
--- a/src/share/vm/runtime/fieldDescriptor.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/fieldDescriptor.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -40,29 +40,40 @@
 class fieldDescriptor VALUE_OBJ_CLASS_SPEC {
  private:
   AccessFlags         _access_flags;
-  int                 _name_index;
-  int                 _signature_index;
-  int                 _initial_value_index;
-  int                 _offset;
-  int                 _generic_signature_index;
-  int                 _index; // index into fields() array
+  int                 _index; // the field index
   constantPoolHandle  _cp;
 
+  // update the access_flags for the field in the klass
+  void update_klass_field_access_flag() {
+    instanceKlass* ik = instanceKlass::cast(field_holder());
+    ik->field(index())->set_access_flags(_access_flags.as_short());
+  }
+
+  FieldInfo* field() const {
+    instanceKlass* ik = instanceKlass::cast(field_holder());
+    return ik->field(_index);
+  }
+
  public:
-  Symbol* name() const                 { return _cp->symbol_at(_name_index); }
-  Symbol* signature() const            { return _cp->symbol_at(_signature_index); }
+  Symbol* name() const {
+    return field()->name(_cp);
+  }
+  Symbol* signature() const {
+    return field()->signature(_cp);
+  }
   klassOop field_holder() const        { return _cp->pool_holder(); }
   constantPoolOop constants() const    { return _cp(); }
   AccessFlags access_flags() const     { return _access_flags; }
   oop loader() const;
   // Offset (in words) of field from start of instanceOop / klassOop
-  int offset() const                   { return _offset; }
-  Symbol* generic_signature() const    { return (_generic_signature_index > 0 ? _cp->symbol_at(_generic_signature_index) : (Symbol*)NULL); }
+  int offset() const                   { return field()->offset(); }
+  Symbol* generic_signature() const    { return field()->generic_signature(_cp); }
   int index() const                    { return _index; }
   typeArrayOop annotations() const;
 
   // Initial field value
-  bool has_initial_value() const          { return _initial_value_index != 0; }
+  bool has_initial_value() const          { return field()->initval_index() != 0; }
+  int initial_value_index() const         { return field()->initval_index(); }
   constantTag initial_value_tag() const;  // The tag will return true on one of is_int(), is_long(), is_single(), is_double()
   jint        int_initial_value() const;
   jlong       long_initial_value() const;
@@ -74,25 +85,31 @@
   BasicType field_type() const            { return FieldType::basic_type(signature()); }
 
   // Access flags
-  bool is_public() const                  { return _access_flags.is_public(); }
-  bool is_private() const                 { return _access_flags.is_private(); }
-  bool is_protected() const               { return _access_flags.is_protected(); }
+  bool is_public() const                  { return access_flags().is_public(); }
+  bool is_private() const                 { return access_flags().is_private(); }
+  bool is_protected() const               { return access_flags().is_protected(); }
   bool is_package_private() const         { return !is_public() && !is_private() && !is_protected(); }
 
-  bool is_static() const                  { return _access_flags.is_static(); }
-  bool is_final() const                   { return _access_flags.is_final(); }
-  bool is_volatile() const                { return _access_flags.is_volatile(); }
-  bool is_transient() const               { return _access_flags.is_transient(); }
+  bool is_static() const                  { return access_flags().is_static(); }
+  bool is_final() const                   { return access_flags().is_final(); }
+  bool is_volatile() const                { return access_flags().is_volatile(); }
+  bool is_transient() const               { return access_flags().is_transient(); }
 
-  bool is_synthetic() const               { return _access_flags.is_synthetic(); }
+  bool is_synthetic() const               { return access_flags().is_synthetic(); }
 
-  bool is_field_access_watched() const    { return _access_flags.is_field_access_watched(); }
+  bool is_field_access_watched() const    { return access_flags().is_field_access_watched(); }
   bool is_field_modification_watched() const
-                                          { return _access_flags.is_field_modification_watched(); }
-  void set_is_field_access_watched(const bool value)
-                                          { _access_flags.set_is_field_access_watched(value); }
-  void set_is_field_modification_watched(const bool value)
-                                          { _access_flags.set_is_field_modification_watched(value); }
+                                          { return access_flags().is_field_modification_watched(); }
+
+  void set_is_field_access_watched(const bool value) {
+    _access_flags.set_is_field_access_watched(value);
+    update_klass_field_access_flag();
+  }
+
+  void set_is_field_modification_watched(const bool value) {
+    _access_flags.set_is_field_modification_watched(value);
+    update_klass_field_access_flag();
+  }
 
   // Initialization
   void initialize(klassOop k, int index);
--- a/src/share/vm/runtime/frame.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/frame.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -221,6 +221,10 @@
   // returns the stack pointer of the calling frame
   intptr_t* sender_sp() const;
 
+  // Deoptimization info, if needed (platform dependent).
+  // Stored in the initial_info field of the unroll info, to be used by
+  // the platform dependent deoptimization blobs.
+  intptr_t *initial_deoptimization_info();
 
   // Interpreter frames:
 
--- a/src/share/vm/runtime/globals.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/globals.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -343,6 +343,12 @@
 #define falseInTiered true
 #endif
 
+#ifdef JAVASE_EMBEDDED
+#define falseInEmbedded false
+#else
+#define falseInEmbedded true
+#endif
+
 // develop flags are settable / visible only during development and are constant in the PRODUCT version
 // product flags are always settable / visible
 // notproduct flags are settable / visible only during development and are not declared in the PRODUCT version
@@ -438,6 +444,9 @@
   product(bool, UsePPCLWSYNC, true,                                         \
           "Use lwsync instruction if true, else use slower sync")           \
                                                                             \
+  develop(bool, CleanChunkPoolAsync, falseInEmbedded,                       \
+          "Whether to clean the chunk pool asynchronously")                 \
+                                                                            \
   /* Temporary: See 6948537 */                                             \
   experimental(bool, UseMemSetInBOT, true,                                  \
           "(Unstable) uses memset in BOT updates in GC code")               \
@@ -466,6 +475,12 @@
   product(bool, UseNUMA, false,                                             \
           "Use NUMA if available")                                          \
                                                                             \
+  product(bool, UseNUMAInterleaving, false,                                 \
+          "Interleave memory across NUMA nodes if available")               \
+                                                                            \
+  product(uintx, NUMAInterleaveGranularity, 2*M,                            \
+          "Granularity to use for NUMA interleaving on Windows OS")         \
+                                                                            \
   product(bool, ForceNUMA, false,                                           \
           "Force NUMA optimizations on single-node/UMA systems")            \
                                                                             \
@@ -492,6 +507,9 @@
   product(intx, UseSSE, 99,                                                 \
           "Highest supported SSE instructions set on x86/x64")              \
                                                                             \
+  product(intx, UseVIS, 99,                                                 \
+          "Highest supported VIS instructions set on Sparc")                \
+                                                                            \
   product(uintx, LargePageSizeInBytes, 0,                                   \
           "Large page size (0 to let VM choose the page size")              \
                                                                             \
@@ -1193,6 +1211,9 @@
   product(bool, UseUnalignedLoadStores, false,                              \
           "Use SSE2 MOVDQU instruction for Arraycopy")                      \
                                                                             \
+  product(bool, UseCBCond, false,                                           \
+          "Use compare and branch instruction on SPARC")                    \
+                                                                            \
   product(intx, FieldsAllocationStyle, 1,                                   \
           "0 - type based with oops first, 1 - with oops last, "            \
           "2 - oops in super and sub classes are together")                 \
@@ -1944,6 +1965,9 @@
           "Number of ObjArray elements to push onto the marking stack"      \
           "before pushing a continuation entry")                            \
                                                                             \
+  notproduct(bool, ExecuteInternalVMTests, false,                           \
+          "Enable execution of internal VM tests.")                         \
+                                                                            \
   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
                                                                             \
   product_pd(bool, ResizeTLAB,                                              \
@@ -1961,6 +1985,18 @@
   product(bool, TLABStats, true,                                            \
           "Print various TLAB related information")                         \
                                                                             \
+  product(bool, UseBlockZeroing, false,                                     \
+          "Use special cpu instructions for block zeroing")                 \
+                                                                            \
+  product(intx, BlockZeroingLowLimit, 2048,                                 \
+          "Minimum size in bytes when block zeroing will be used")          \
+                                                                            \
+  product(bool, UseBlockCopy, false,                                        \
+          "Use special cpu instructions for block copy")                    \
+                                                                            \
+  product(intx, BlockCopyLowLimit, 2048,                                    \
+          "Minimum size in bytes when block copy will be used")             \
+                                                                            \
   product(bool, PrintRevisitStats, false,                                   \
           "Print revisit (klass and MDO) stack related information")        \
                                                                             \
@@ -2332,6 +2368,20 @@
           "Print diagnostic message when GC is stalled"                     \
           "by JNI critical section")                                        \
                                                                             \
+  /* GC log rotation setting */                                             \
+                                                                            \
+  product(bool, UseGCLogFileRotation, false,                                \
+          "Prevent large gclog file for long running app. "                 \
+          "Requires -Xloggc:<filename>")                                    \
+                                                                            \
+  product(uintx, NumberOfGCLogFiles, 0,                                     \
+          "Number of gclog files in rotation, "                             \
+          "Default: 0, no rotation")                                        \
+                                                                            \
+  product(uintx, GCLogFileSize, 0,                                          \
+          "GC log file size, Default: 0 bytes, no rotation "                \
+          "Only valid with UseGCLogFileRotation")                           \
+                                                                            \
   /* JVMTI heap profiling */                                                \
                                                                             \
   diagnostic(bool, TraceJVMTIObjectTagging, false,                          \
@@ -2865,8 +2915,11 @@
   product(intx,  AllocatePrefetchDistance, -1,                              \
           "Distance to prefetch ahead of allocation pointer")               \
                                                                             \
-  product(intx,  AllocatePrefetchLines, 1,                                  \
-          "Number of lines to prefetch ahead of allocation pointer")        \
+  product(intx,  AllocatePrefetchLines, 3,                                  \
+          "Number of lines to prefetch ahead of array allocation pointer")  \
+                                                                            \
+  product(intx,  AllocateInstancePrefetchLines, 1,                          \
+          "Number of lines to prefetch ahead of instance allocation pointer") \
                                                                             \
   product(intx,  AllocatePrefetchStepSize, 16,                              \
           "Step size in bytes of sequential prefetch instructions")         \
@@ -2877,6 +2930,12 @@
   product(intx,  ReadPrefetchInstr, 0,                                      \
           "Prefetch instruction to prefetch ahead")                         \
                                                                             \
+  product(uintx,  ArraycopySrcPrefetchDistance, 0,                          \
+          "Distance to prefetch source array in arracopy")                  \
+                                                                            \
+  product(uintx,  ArraycopyDstPrefetchDistance, 0,                          \
+          "Distance to prefetch destination array in arracopy")             \
+                                                                            \
   /* deoptimization */                                                      \
   develop(bool, TraceDeoptimization, false,                                 \
           "Trace deoptimization")                                           \
@@ -3594,13 +3653,9 @@
                                                                             \
   /* flags for performance data collection */                               \
                                                                             \
-  NOT_EMBEDDED(product(bool, UsePerfData, true,                             \
+  product(bool, UsePerfData, falseInEmbedded,                               \
           "Flag to disable jvmstat instrumentation for performance testing" \
-          "and problem isolation purposes."))                               \
-                                                                            \
-  EMBEDDED_ONLY(product(bool, UsePerfData, false,                           \
-          "Flag to disable jvmstat instrumentation for performance testing" \
-          "and problem isolation purposes."))                               \
+          "and problem isolation purposes.")                                \
                                                                             \
   product(bool, PerfDataSaveToFile, false,                                  \
           "Save PerfData memory to hsperfdata_<pid> file on exit")          \
--- a/src/share/vm/runtime/java.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/java.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -243,6 +243,7 @@
     FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintC1Statistics);
     Runtime1::print_statistics();
     Deoptimization::print_statistics();
+    SharedRuntime::print_statistics();
     nmethod::print_statistics();
   }
 #endif /* COMPILER1 */
@@ -254,8 +255,8 @@
 #ifndef COMPILER1
     Deoptimization::print_statistics();
     nmethod::print_statistics();
+    SharedRuntime::print_statistics();
 #endif //COMPILER1
-    SharedRuntime::print_statistics();
     os::print_statistics();
   }
 
@@ -468,12 +469,10 @@
   StatSampler::disengage();
   StatSampler::destroy();
 
-#ifndef SERIALGC
-  // stop CMS threads
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepThread::stop();
-  }
-#endif // SERIALGC
+  // We do not need to explicitly stop concurrent GC threads because the
+  // JVM will be taken down at a safepoint when such threads are inactive --
+  // except for some concurrent G1 threads, see (comment in)
+  // Threads::destroy_vm().
 
   // Print GC/heap related information.
   if (PrintGCDetails) {
@@ -673,7 +672,8 @@
     _current = JDK_Version(major, minor, micro, info.update_version,
                            info.special_update_version, build,
                            info.thread_park_blocker == 1,
-                           info.post_vm_init_hook_enabled == 1);
+                           info.post_vm_init_hook_enabled == 1,
+                           info.pending_list_uses_discovered_field == 1);
   }
 }
 
--- a/src/share/vm/runtime/java.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/java.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -92,6 +92,7 @@
   bool _partially_initialized;
 
   bool _thread_park_blocker;
+  bool _pending_list_uses_discovered_field;
   bool _post_vm_init_hook_enabled;
 
   bool is_valid() const {
@@ -114,15 +115,18 @@
 
   JDK_Version() : _major(0), _minor(0), _micro(0), _update(0),
                   _special(0), _build(0), _partially_initialized(false),
-                  _thread_park_blocker(false), _post_vm_init_hook_enabled(false) {}
+                  _thread_park_blocker(false), _post_vm_init_hook_enabled(false),
+                  _pending_list_uses_discovered_field(false) {}
 
   JDK_Version(uint8_t major, uint8_t minor = 0, uint8_t micro = 0,
               uint8_t update = 0, uint8_t special = 0, uint8_t build = 0,
-              bool thread_park_blocker = false, bool post_vm_init_hook_enabled = false) :
+              bool thread_park_blocker = false, bool post_vm_init_hook_enabled = false,
+              bool pending_list_uses_discovered_field = false) :
       _major(major), _minor(minor), _micro(micro), _update(update),
       _special(special), _build(build), _partially_initialized(false),
       _thread_park_blocker(thread_park_blocker),
-      _post_vm_init_hook_enabled(post_vm_init_hook_enabled) {}
+      _post_vm_init_hook_enabled(post_vm_init_hook_enabled),
+      _pending_list_uses_discovered_field(pending_list_uses_discovered_field) {}
 
   // Returns the current running JDK version
   static JDK_Version current() { return _current; }
@@ -149,6 +153,10 @@
   bool post_vm_init_hook_enabled() const {
     return _post_vm_init_hook_enabled;
   }
+  // For compatibility wrt pre-4965777 JDK's
+  bool pending_list_uses_discovered_field() const {
+    return _pending_list_uses_discovered_field;
+  }
 
   // Performs a full ordering comparison using all fields (update, build, etc.)
   int compare(const JDK_Version& other) const;
--- a/src/share/vm/runtime/javaCalls.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/javaCalls.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -355,7 +355,7 @@
   assert(!thread->is_Compiler_thread(), "cannot compile from the compiler");
   if (CompilationPolicy::must_be_compiled(method)) {
     CompileBroker::compile_method(method, InvocationEntryBci,
-                                  CompLevel_initial_compile,
+                                  CompilationPolicy::policy()->initial_compile_level(),
                                   methodHandle(), 0, "must_be_compiled", CHECK);
   }
 
--- a/src/share/vm/runtime/os.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/os.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -761,6 +761,7 @@
   // st->print("(active %d)", os::active_processor_count());
   st->print(" %s", VM_Version::cpu_features());
   st->cr();
+  pd_print_cpu_info(st);
 }
 
 void os::print_date_and_time(outputStream *st) {
@@ -1234,6 +1235,17 @@
 }
 
 #ifndef PRODUCT
+void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count)
+{
+  if (TracePageSizes) {
+    tty->print("%s: ", str);
+    for (int i = 0; i < count; ++i) {
+      tty->print(" " SIZE_FORMAT, page_sizes[i]);
+    }
+    tty->cr();
+  }
+}
+
 void os::trace_page_sizes(const char* str, const size_t region_min_size,
                           const size_t region_max_size, const size_t page_size,
                           const char* base, const size_t size)
--- a/src/share/vm/runtime/os.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/os.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -208,11 +208,13 @@
                                      size_t region_max_size,
                                      uint min_pages);
 
-  // Method for tracing page sizes returned by the above method; enabled by
+  // Methods for tracing page sizes returned by the above method; enabled by
   // TracePageSizes.  The region_{min,max}_size parameters should be the values
   // passed to page_size_for_region() and page_size should be the result of that
   // call.  The (optional) base and size parameters should come from the
   // ReservedSpace base() and size() methods.
+  static void trace_page_sizes(const char* str, const size_t* page_sizes,
+                               int count) PRODUCT_RETURN;
   static void trace_page_sizes(const char* str, const size_t region_min_size,
                                const size_t region_max_size,
                                const size_t page_size,
@@ -480,6 +482,7 @@
   // Output format may be different on different platforms.
   static void print_os_info(outputStream* st);
   static void print_cpu_info(outputStream* st);
+  static void pd_print_cpu_info(outputStream* st);
   static void print_memory_info(outputStream* st);
   static void print_dll_info(outputStream* st);
   static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len);
--- a/src/share/vm/runtime/reflection.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/reflection.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -844,16 +844,6 @@
 }
 
 
-//---------------------------------------------------------------------------
-//
-// Supporting routines for old native code-based reflection (pre-JDK 1.4).
-//
-// See reflection.hpp for details.
-//
-//---------------------------------------------------------------------------
-
-#ifdef SUPPORT_OLD_REFLECTION
-
 methodHandle Reflection::resolve_interface_call(instanceKlassHandle klass, methodHandle method,
                                                 KlassHandle recv_klass, Handle receiver, TRAPS) {
   assert(!method.is_null() , "method should not be null");
@@ -1081,519 +1071,6 @@
   return java_lang_Class::primitive_type(basic_type_mirror);
 }
 
-
-bool Reflection::match_parameter_types(methodHandle method, objArrayHandle types, int parameter_count, TRAPS) {
-  int types_len = types.is_null() ? 0 : types->length();
-  if (types_len != parameter_count) return false;
-  if (parameter_count > 0) {
-    objArrayHandle method_types = get_parameter_types(method, parameter_count, NULL, CHECK_false);
-    for (int index = 0; index < parameter_count; index++) {
-      if (types->obj_at(index) != method_types->obj_at(index)) {
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-
-oop Reflection::new_field(FieldStream* st, TRAPS) {
-  Symbol*  field_name = st->name();
-  Handle name = java_lang_String::create_from_symbol(field_name, CHECK_NULL);
-  Symbol*  signature = st->signature();
-  Handle type = new_type(signature, st->klass(), CHECK_NULL);
-  Handle rh  = java_lang_reflect_Field::create(CHECK_NULL);
-  oop result = rh();
-
-  java_lang_reflect_Field::set_clazz(result, st->klass()->java_mirror());
-  java_lang_reflect_Field::set_slot(result, st->index());
-  java_lang_reflect_Field::set_name(result, name());
-  java_lang_reflect_Field::set_type(result, type());
-  // Note the ACC_ANNOTATION bit, which is a per-class access flag, is never set here.
-  java_lang_reflect_Field::set_modifiers(result, st->access_flags().as_int() & JVM_RECOGNIZED_FIELD_MODIFIERS);
-  java_lang_reflect_Field::set_override(result, false);
-  return result;
-}
-
-
-bool Reflection::resolve_field(Handle field_mirror, Handle& receiver, fieldDescriptor* fd, bool check_final, TRAPS) {
-  if (field_mirror.is_null()) {
-    THROW_(vmSymbols::java_lang_NullPointerException(), false);
-  }
-
-  instanceKlassHandle klass (THREAD, java_lang_Class::as_klassOop(java_lang_reflect_Field::clazz(field_mirror())));
-  int                 slot  = java_lang_reflect_Field::slot(field_mirror());
-
-  // Ensure klass is initialized
-  klass->initialize(CHECK_false);
-  fd->initialize(klass(), slot);
-
-  bool is_static = fd->is_static();
-  KlassHandle receiver_klass;
-
-  if (is_static) {
-    receiver = KlassHandle(THREAD, klass());
-    receiver_klass = klass;
-  } else {
-    // Check object is a non-null instance of declaring class
-    if (receiver.is_null()) {
-      THROW_(vmSymbols::java_lang_NullPointerException(), false);
-    }
-    if (!receiver->is_a(klass())) {
-      THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "object is not an instance of declaring class", false);
-    }
-    receiver_klass = KlassHandle(THREAD, receiver->klass());
-  }
-
-  // Access checking (unless overridden by Field)
-  if (!java_lang_reflect_Field::override(field_mirror())) {
-    if (!(klass->is_public() && fd->is_public())) {
-      bool access_check = reflect_check_access(klass(), fd->access_flags(), receiver_klass(), false, CHECK_false);
-      if (!access_check) {
-        return false; // exception
-      }
-    }
-  }
-
-  if (check_final && fd->is_final()) {
-    // In 1.3 we always throw an error when attempting to set a final field.
-    // In 1.2.x, this was allowed in the override bit was set by calling Field.setAccessible(true).
-    // We currently maintain backwards compatibility. See bug 4250960.
-    bool strict_final_check = !JDK_Version::is_jdk12x_version();
-    if (strict_final_check || !java_lang_reflect_Field::override(field_mirror())) {
-      THROW_MSG_(vmSymbols::java_lang_IllegalAccessException(), "field is final", false);
-    }
-  }
-  return true;
-}
-
-
-BasicType Reflection::field_get(jvalue* value, fieldDescriptor* fd, Handle receiver)  {
-  BasicType field_type = fd->field_type();
-  int offset = fd->offset();
-  switch (field_type) {
-    case T_BOOLEAN:
-      value->z = receiver->bool_field(offset);
-      break;
-    case T_CHAR:
-      value->c = receiver->char_field(offset);
-      break;
-    case T_FLOAT:
-      value->f = receiver->float_field(offset);
-      break;
-    case T_DOUBLE:
-      value->d = receiver->double_field(offset);
-      break;
-    case T_BYTE:
-      value->b = receiver->byte_field(offset);
-      break;
-    case T_SHORT:
-      value->s = receiver->short_field(offset);
-      break;
-    case T_INT:
-      value->i = receiver->int_field(offset);
-      break;
-    case T_LONG:
-      value->j = receiver->long_field(offset);
-      break;
-    case T_OBJECT:
-    case T_ARRAY:
-      value->l = (jobject) receiver->obj_field(offset);
-      break;
-    default:
-      return T_ILLEGAL;
-  }
-  return field_type;
-}
-
-
-void Reflection::field_set(jvalue* value, fieldDescriptor* fd, Handle receiver, BasicType value_type, TRAPS) {
-  BasicType field_type = fd->field_type();
-  if (field_type != value_type) {
-    widen(value, value_type, field_type, CHECK);
-  }
-
-  int offset = fd->offset();
-  switch (field_type) {
-    case T_BOOLEAN:
-      receiver->bool_field_put(offset, value->z);
-      break;
-    case T_CHAR:
-      receiver->char_field_put(offset, value->c);
-      break;
-    case T_FLOAT:
-      receiver->float_field_put(offset, value->f);
-      break;
-    case T_DOUBLE:
-      receiver->double_field_put(offset, value->d);
-      break;
-    case T_BYTE:
-      receiver->byte_field_put(offset, value->b);
-      break;
-    case T_SHORT:
-      receiver->short_field_put(offset, value->s);
-      break;
-    case T_INT:
-      receiver->int_field_put(offset, value->i);
-      break;
-    case T_LONG:
-      receiver->long_field_put(offset, value->j);
-      break;
-    case T_OBJECT:
-    case T_ARRAY: {
-      Handle obj(THREAD, (oop) value->l);
-      if (obj.not_null()) {
-        Symbol*  signature = fd->signature();
-        Handle       loader   (THREAD, fd->loader());
-        Handle       protect  (THREAD, Klass::cast(fd->field_holder())->protection_domain());
-        klassOop k = SystemDictionary::resolve_or_fail(signature, loader, protect, true, CHECK); // may block
-        if (!obj->is_a(k)) {
-          THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "field type mismatch");
-        }
-      }
-      receiver->obj_field_put(offset, obj());
-      break;
-    }
-    default:
-      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "field type mismatch");
-  }
-}
-
-
-oop Reflection::reflect_field(oop mirror, Symbol* field_name, jint which, TRAPS) {
-  // Exclude primitive types and array types
-  if (java_lang_Class::is_primitive(mirror))                             return NULL;
-  if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) return NULL;
-
-  instanceKlassHandle k(THREAD, java_lang_Class::as_klassOop(mirror));
-  bool local_fields_only = (which == DECLARED);
-
-  // Ensure class is linked
-  k->link_class(CHECK_NULL);
-
-  // Search class and interface fields
-  for (FieldStream st(k, local_fields_only, false); !st.eos(); st.next()) {
-    if (st.name() == field_name) {
-      if (local_fields_only || st.access_flags().is_public()) {
-        return new_field(&st, THREAD);
-      }
-    }
-  }
-
-  return NULL;
-}
-
-
-objArrayOop Reflection::reflect_fields(oop mirror, jint which, TRAPS) {
-  // Exclude primitive types and array types
-  if (java_lang_Class::is_primitive(mirror)
-      || Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
-    Symbol* name = vmSymbols::java_lang_reflect_Field();
-    klassOop klass = SystemDictionary::resolve_or_fail(name, true, CHECK_NULL);
-    return oopFactory::new_objArray(klass, 0, CHECK_NULL);  // Return empty array
-  }
-
-  instanceKlassHandle k(THREAD, java_lang_Class::as_klassOop(mirror));
-
-  // Ensure class is linked
-  k->link_class(CHECK_NULL);
-
-  bool local_fields_only = (which == DECLARED);
-  int count = 0;
-  { // Compute fields count for class and interface fields
-    for (FieldStream st(k, local_fields_only, false); !st.eos(); st.next()) {
-      if (local_fields_only || st.access_flags().is_public()) {
-        count++;
-      }
-    }
-  }
-
-  // Allocate result
-  Symbol* name = vmSymbols::java_lang_reflect_Field();
-  klassOop klass = SystemDictionary::resolve_or_fail(name, true, CHECK_NULL);
-  objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
-  objArrayHandle result (THREAD, r);
-
-  // Fill in results backwards
-  {
-    for (FieldStream st(k, local_fields_only, false); !st.eos(); st.next()) {
-      if (local_fields_only || st.access_flags().is_public()) {
-        oop field = new_field(&st, CHECK_NULL);
-        result->obj_at_put(--count, field);
-      }
-    }
-    assert(count == 0, "just checking");
-  }
-  return result();
-}
-
-
-oop Reflection::reflect_method(oop mirror, Symbol* method_name, objArrayHandle types, jint which, TRAPS) {
-  if (java_lang_Class::is_primitive(mirror))  return NULL;
-  klassOop klass = java_lang_Class::as_klassOop(mirror);
-  if (Klass::cast(klass)->oop_is_array() && which == MEMBER_DECLARED)  return NULL;
-
-  if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
-    klass = SystemDictionary::Object_klass();
-  }
-  instanceKlassHandle h_k(THREAD, klass);
-
-  // Ensure klass is linked (need not be initialized)
-  h_k->link_class(CHECK_NULL);
-
-  // For interfaces include static initializers under jdk1.2.x (since classic does that)
-  bool include_clinit = JDK_Version::is_jdk12x_version() && h_k->is_interface();
-
-  switch (which) {
-    case MEMBER_PUBLIC:
-      // First the public non-static methods (works if method holder is an interface)
-      // Note that we can ignore checks for overridden methods, since we go up the hierarchy.
-      {
-        for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
-          methodHandle m(THREAD, st.method());
-          // For interfaces include static initializers since classic does that!
-          if (method_name == m->name() && (include_clinit || (m->is_public() && !m->is_static() && !m->is_initializer()))) {
-            Symbol*  signature = m->signature();
-            bool parameter_match = match_parameter_types(m, types, ArgumentCount(signature).size(), CHECK_NULL);
-            if (parameter_match) {
-              return new_method(m, false, false, THREAD);
-            }
-          }
-        }
-      }
-      // Then the public static methods (works if method holder is an interface)
-      {
-        for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
-          methodHandle m(THREAD, st.method());
-          if (method_name == m->name() && m->is_public() && m->is_static() && !m->is_initializer()) {
-            Symbol*  signature = m->signature();
-            bool parameter_match = match_parameter_types(m, types, ArgumentCount(signature).size(), CHECK_NULL);
-            if (parameter_match) {
-              return new_method(m, false, false, THREAD);
-            }
-          }
-        }
-      }
-      break;
-    case MEMBER_DECLARED:
-      // All local methods
-      {
-        for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
-          methodHandle m(THREAD, st.method());
-          if (method_name == m->name() && !m->is_initializer()) {
-            Symbol*  signature = m->signature();
-            bool parameter_match = match_parameter_types(m, types, ArgumentCount(signature).size(), CHECK_NULL);
-            if (parameter_match) {
-              return new_method(m, false, false, THREAD);
-            }
-          }
-        }
-      }
-      break;
-    default:
-      break;
-  }
-  return NULL;
-}
-
-
-objArrayOop Reflection::reflect_methods(oop mirror, jint which, TRAPS) {
-  // Exclude primitive types
-  if (java_lang_Class::is_primitive(mirror) ||
-     (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array() && (which == MEMBER_DECLARED))) {
-    klassOop klass = SystemDictionary::reflect_Method_klass();
-    return oopFactory::new_objArray(klass, 0, CHECK_NULL);  // Return empty array
-  }
-
-  klassOop klass = java_lang_Class::as_klassOop(mirror);
-  if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
-    klass = SystemDictionary::Object_klass();
-  }
-  instanceKlassHandle h_k(THREAD, klass);
-
-  // Ensure klass is linked (need not be initialized)
-  h_k->link_class(CHECK_NULL);
-
-  // We search the (super)interfaces only if h_k is an interface itself
-  bool is_interface = h_k->is_interface();
-
-  // For interfaces include static initializers under jdk1.2.x (since classic does that)
-  bool include_clinit = JDK_Version::is_jdk12x_version() && is_interface;
-
-  switch (which) {
-    case MEMBER_PUBLIC:
-      {
-
-        // Count public methods (non-static and static)
-        int count = 0;
-        {
-          for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
-            methodOop m = st.method();
-            // For interfaces include static initializers since classic does that!
-            if (include_clinit || (!m->is_initializer() && m->is_public() && !m->is_overridden_in(h_k()))) {
-              count++;
-            }
-          }
-        }
-
-        // Allocate result
-        klassOop klass = SystemDictionary::reflect_Method_klass();
-        objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
-        objArrayHandle h_result (THREAD, r);
-
-        // Fill in results backwards
-        {
-          // First the non-static public methods
-          for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
-            methodHandle m (THREAD, st.method());
-            if (!m->is_static() && !m->is_initializer() && m->is_public() && !m->is_overridden_in(h_k())) {
-              oop method = new_method(m, false, false, CHECK_NULL);
-              if (method == NULL) {
-                return NULL;
-              } else {
-                h_result->obj_at_put(--count, method);
-              }
-            }
-          }
-        }
-        {
-          // Then the static public methods
-          for (MethodStream st(h_k, false, !is_interface); !st.eos(); st.next()) {
-            methodHandle m (THREAD, st.method());
-            if (m->is_static() && (include_clinit || (!m->is_initializer()) && m->is_public() && !m->is_overridden_in(h_k()))) {
-              oop method = new_method(m, false, false, CHECK_NULL);
-              if (method == NULL) {
-                return NULL;
-              } else {
-                h_result->obj_at_put(--count, method);
-              }
-            }
-          }
-        }
-
-        assert(count == 0, "just checking");
-        return h_result();
-      }
-
-    case MEMBER_DECLARED:
-      {
-        // Count all methods
-        int count = 0;
-        {
-          for (MethodStream st(h_k, true, !is_interface); !st.eos(); st.next()) {
-            methodOop m = st.method();
-            if (!m->is_initializer()) {
-              count++;
-            }
-          }
-        }
-        // Allocate result
-        klassOop klass = SystemDictionary::reflect_Method_klass();
-        objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
-        objArrayHandle h_result (THREAD, r);
-
-        // Fill in results backwards
-        {
-          for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
-            methodHandle m (THREAD, st.method());
-            if (!m->is_initializer()) {
-              oop method = new_method(m, false, false, CHECK_NULL);
-              if (method == NULL) {
-                return NULL;
-              } else {
-                h_result->obj_at_put(--count, method);
-              }
-            }
-          }
-        }
-        assert(count == 0, "just checking");
-        return h_result();
-      }
-  }
-  ShouldNotReachHere();
-  return NULL;
-}
-
-
-oop Reflection::reflect_constructor(oop mirror, objArrayHandle types, jint which, TRAPS) {
-
-  // Exclude primitive, interface and array types
-  bool prim = java_lang_Class::is_primitive(mirror);
-  Klass* klass = prim ? NULL : Klass::cast(java_lang_Class::as_klassOop(mirror));
-  if (prim || klass->is_interface() || klass->oop_is_array()) return NULL;
-
-  // Must be instance klass
-  instanceKlassHandle h_k(THREAD, java_lang_Class::as_klassOop(mirror));
-
-  // Ensure klass is linked (need not be initialized)
-  h_k->link_class(CHECK_NULL);
-
-  bool local_only = (which == MEMBER_DECLARED);
-  for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
-    methodHandle m(THREAD, st.method());
-    if (m->name() == vmSymbols::object_initializer_name() && (local_only || m->is_public())) {
-      Symbol*  signature = m->signature();
-      bool parameter_match = match_parameter_types(m, types, ArgumentCount(signature).size(), CHECK_NULL);
-      if (parameter_match) {
-        return new_constructor(m, THREAD);
-      }
-    }
-  }
-
-  return NULL;
-}
-
-
-objArrayOop Reflection::reflect_constructors(oop mirror, jint which, TRAPS) {
-  // Exclude primitive, interface and array types
-  bool prim  = java_lang_Class::is_primitive(mirror);
-  Klass* k = prim ? NULL : Klass::cast(java_lang_Class::as_klassOop(mirror));
-  if (prim || k->is_interface() || k->oop_is_array()) {
-    return oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0, CHECK_NULL);  // Return empty array
-  }
-
-  // Must be instanceKlass at this point
-  instanceKlassHandle h_k(THREAD, java_lang_Class::as_klassOop(mirror));
-
-  // Ensure klass is linked (need not be initialized)
-  h_k->link_class(CHECK_NULL);
-
-  bool local_only = (which == MEMBER_DECLARED);
-  int count = 0;
-  {
-    for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
-      methodOop m = st.method();
-      if (m->name() == vmSymbols::object_initializer_name() && (local_only || m->is_public())) {
-        count++;
-      }
-    }
-  }
-
-  // Allocate result
-  Symbol* name = vmSymbols::java_lang_reflect_Constructor();
-  klassOop klass = SystemDictionary::resolve_or_fail(name, true, CHECK_NULL);
-  objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
-  objArrayHandle h_result (THREAD, r);
-
-  // Fill in results backwards
-  {
-    for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
-      methodHandle m (THREAD, st.method());
-      if (m->name() == vmSymbols::object_initializer_name() && (local_only || m->is_public())) {
-        oop constr = new_constructor(m, CHECK_NULL);
-        if (constr == NULL) {
-          return NULL;
-        } else {
-          h_result->obj_at_put(--count, constr);
-        }
-      }
-    }
-    assert(count == 0, "just checking");
-  }
-  return h_result();
-}
-
-
 // This would be nicer if, say, java.lang.reflect.Method was a subclass
 // of java.lang.reflect.Constructor
 
@@ -1647,6 +1124,3 @@
   invoke(klass, method, receiver, override, ptypes, T_VOID, args, false, CHECK_NULL);
   return receiver();
 }
-
-
-#endif /* SUPPORT_OLD_REFLECTION */
--- a/src/share/vm/runtime/reflection.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/reflection.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -27,7 +27,6 @@
 
 #include "oops/oop.hpp"
 #include "runtime/fieldDescriptor.hpp"
-#include "runtime/reflectionCompat.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/growableArray.hpp"
 
@@ -120,16 +119,6 @@
   // Create a java.lang.reflect.Field object based on a field descriptor
   static oop new_field(fieldDescriptor* fd, bool intern_name, TRAPS);
 
-  //---------------------------------------------------------------------------
-  //
-  // Support for old native code-based reflection (pre-JDK 1.4)
-  //
-  // NOTE: the method and constructor invocation code is still used
-  // for startup time reasons; see reflectionCompat.hpp.
-  //
-  //---------------------------------------------------------------------------
-
-#ifdef SUPPORT_OLD_REFLECTION
 private:
   // method resolution for invoke
   static methodHandle resolve_interface_call(instanceKlassHandle klass, methodHandle method, KlassHandle recv_klass, Handle receiver, TRAPS);
@@ -144,35 +133,11 @@
   // Conversion
   static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS);
 
-  static bool match_parameter_types(methodHandle method, objArrayHandle types, int parameter_count, TRAPS);
-  // Creating new java.lang.reflect.xxx wrappers
-  static oop new_field(FieldStream* st, TRAPS);
-
 public:
-  // Field lookup and verification.
-  static bool      resolve_field(Handle field_mirror, Handle& receiver, fieldDescriptor* fd, bool check_final, TRAPS);
-
-  // Reflective field access. Returns type code. Throws IllegalArgumentException.
-  static BasicType field_get(jvalue* value, fieldDescriptor* fd, Handle receiver);
-  static void      field_set(jvalue* value, fieldDescriptor* fd, Handle receiver, BasicType value_type, TRAPS);
-
-  // Reflective lookup of fields. Returns java.lang.reflect.Field instances.
-  static oop         reflect_field(oop mirror, Symbol* field_name, jint which, TRAPS);
-  static objArrayOop reflect_fields(oop mirror, jint which, TRAPS);
-
-  // Reflective lookup of methods. Returns java.lang.reflect.Method instances.
-  static oop         reflect_method(oop mirror, Symbol* method_name, objArrayHandle types, jint which, TRAPS);
-  static objArrayOop reflect_methods(oop mirror, jint which, TRAPS);
-
-  // Reflective lookup of constructors. Returns java.lang.reflect.Constructor instances.
-  static oop         reflect_constructor(oop mirror, objArrayHandle types, jint which, TRAPS);
-  static objArrayOop reflect_constructors(oop mirror, jint which, TRAPS);
-
   // Method invokation through java.lang.reflect.Method
   static oop      invoke_method(oop method_mirror, Handle receiver, objArrayHandle args, TRAPS);
   // Method invokation through java.lang.reflect.Constructor
   static oop      invoke_constructor(oop method_mirror, objArrayHandle args, TRAPS);
-#endif /* SUPPORT_OLD_REFLECTION */
 
 };
 
--- a/src/share/vm/runtime/reflectionCompat.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_REFLECTIONCOMPAT_HPP
-#define SHARE_VM_RUNTIME_REFLECTIONCOMPAT_HPP
-
-// During the development of the JDK 1.4 reflection implementation
-// based on dynamic bytecode generation, it was hoped that the bulk of
-// the native code for reflection could be removed. Unfortunately
-// there is currently a significant cost associated with loading the
-// stub classes which impacts startup time. Until this cost can be
-// reduced, the JVM entry points JVM_InvokeMethod and
-// JVM_NewInstanceFromConstructor are still needed; these and their
-// dependents currently constitute the bulk of the native code for
-// reflection. If this cost is reduced in the future, the
-// NativeMethodAccessorImpl and NativeConstructorAccessorImpl classes
-// can be removed from sun.reflect and all of the code guarded by this
-// flag removed from the product build. (Non-product builds,
-// specifically the "optimized" target, would retain the code so they
-// could be dropped into earlier JDKs for comparative benchmarking.)
-
-//#ifndef PRODUCT
-# define SUPPORT_OLD_REFLECTION
-//#endif
-
-#endif // SHARE_VM_RUNTIME_REFLECTIONCOMPAT_HPP
--- a/src/share/vm/runtime/reflectionUtils.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/reflectionUtils.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -107,10 +107,8 @@
 
 class FieldStream : public KlassStream {
  private:
-  int length() const                { return fields()->length(); }
-  constantPoolOop constants() const { return _klass->constants(); }
- protected:
-  typeArrayOop fields() const       { return _klass->fields(); }
+  int length() const                { return _klass->java_fields_count(); }
+
  public:
   FieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
     : KlassStream(klass, local_only, classes_only) {
@@ -118,26 +116,23 @@
     next();
   }
 
-  void next() { _index -= instanceKlass::next_offset; }
+  void next() { _index -= 1; }
 
   // Accessors for current field
   AccessFlags access_flags() const {
     AccessFlags flags;
-    flags.set_flags(fields()->ushort_at(index() + instanceKlass::access_flags_offset));
+    flags.set_flags(_klass->field_access_flags(_index));
     return flags;
   }
   Symbol* name() const {
-    int name_index = fields()->ushort_at(index() + instanceKlass::name_index_offset);
-    return constants()->symbol_at(name_index);
+    return _klass->field_name(_index);
   }
   Symbol* signature() const {
-    int signature_index = fields()->ushort_at(index() +
-                                       instanceKlass::signature_index_offset);
-    return constants()->symbol_at(signature_index);
+    return _klass->field_signature(_index);
   }
   // missing: initval()
   int offset() const {
-    return _klass->offset_from_fields( index() );
+    return _klass->field_offset( index() );
   }
 };
 
@@ -213,10 +208,10 @@
   }
   int field_count();
   void next() {
-    _index -= instanceKlass::next_offset;
+    _index -= 1;
     if (has_filtered_field()) {
       while (_index >=0 && FilteredFieldsMap::is_filtered_field((klassOop)_klass(), offset())) {
-        _index -= instanceKlass::next_offset;
+        _index -= 1;
       }
     }
   }
--- a/src/share/vm/runtime/safepoint.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/safepoint.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -511,6 +511,11 @@
 
   TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
   NMethodSweeper::scan_stacks();
+
+  // rotate log files?
+  if (UseGCLogFileRotation) {
+    gclog_or_tty->rotate_log();
+  }
 }
 
 
--- a/src/share/vm/runtime/sharedRuntime.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -2130,9 +2130,9 @@
  public:
   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
     // The fingerprint is based on the BasicType signature encoded
-    // into an array of ints with four entries per int.
+    // into an array of ints with eight entries per int.
     int* ptr;
-    int len = (total_args_passed + 3) >> 2;
+    int len = (total_args_passed + 7) >> 3;
     if (len <= (int)(sizeof(_value._compact) / sizeof(int))) {
       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
       // Storing the signature encoded as signed chars hits about 98%
@@ -2145,11 +2145,11 @@
       ptr = _value._fingerprint;
     }
 
-    // Now pack the BasicTypes with 4 per int
+    // Now pack the BasicTypes with 8 per int
     int sig_index = 0;
     for (int index = 0; index < len; index++) {
       int value = 0;
-      for (int byte = 0; byte < 4; byte++) {
+      for (int byte = 0; byte < 8; byte++) {
         if (sig_index < total_args_passed) {
           value = (value << 4) | adapter_encoding(sig_bt[sig_index++]);
         }
@@ -2190,8 +2190,9 @@
 
   const char* as_string() {
     stringStream st;
+    st.print("0x");
     for (int i = 0; i < length(); i++) {
-      st.print(PTR_FORMAT, value(i));
+      st.print("%08x", value(i));
     }
     return st.as_string();
   }
--- a/src/share/vm/runtime/sharedRuntime.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -45,6 +45,8 @@
 // information, etc.
 
 class SharedRuntime: AllStatic {
+  friend class VMStructs;
+
  private:
   static methodHandle resolve_sub_helper(JavaThread *thread,
                                      bool is_virtual,
--- a/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -50,15 +50,18 @@
   case COMPILE:
     tty->print("compile");
     break;
-  case KILL:
-    tty->print("kill");
+  case REMOVE_FROM_QUEUE:
+    tty->print("remove-from-queue");
     break;
-  case UPDATE:
-    tty->print("update");
+  case UPDATE_IN_QUEUE:
+    tty->print("update-in-queue");
     break;
   case REPROFILE:
     tty->print("reprofile");
     break;
+  case MAKE_NOT_ENTRANT:
+    tty->print("make-not-entrant");
+    break;
   default:
     tty->print("unknown");
   }
@@ -68,7 +71,6 @@
   ResourceMark rm;
   char *method_name = mh->name_and_sig_as_C_string();
   tty->print("[%s", method_name);
-  // We can have an inlinee, although currently we don't generate any notifications for the inlined methods.
   if (inlinee_event) {
     char *inlinee_name = imh->name_and_sig_as_C_string();
     tty->print(" [%s]] ", inlinee_name);
@@ -170,7 +172,7 @@
 }
 
 nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
-                                      int branch_bci, int bci, CompLevel comp_level, TRAPS) {
+                                      int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
   if (comp_level == CompLevel_none &&
       JvmtiExport::can_post_interpreter_events()) {
     assert(THREAD->is_Java_thread(), "Should be java thread");
@@ -190,12 +192,13 @@
   }
 
   if (bci == InvocationEntryBci) {
-    method_invocation_event(method, inlinee, comp_level, THREAD);
+    method_invocation_event(method, inlinee, comp_level, nm, THREAD);
   } else {
-    method_back_branch_event(method, inlinee, bci, comp_level, THREAD);
-    int highest_level = method->highest_osr_comp_level();
+    method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
+    // method == inlinee if the event originated in the main method
+    int highest_level = inlinee->highest_osr_comp_level();
     if (highest_level > comp_level) {
-      osr_nm = method->lookup_osr_nmethod_for(bci, highest_level, false);
+      osr_nm = inlinee->lookup_osr_nmethod_for(bci, highest_level, false);
     }
   }
   return osr_nm;
@@ -203,11 +206,7 @@
 
 // Check if the method can be compiled, change level if necessary
 void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
-  // Take the given ceiling into the account.
-  // NOTE: You can set it to 1 to get a pure C1 version.
-  if ((CompLevel)TieredStopAtLevel < level) {
-    level = (CompLevel)TieredStopAtLevel;
-  }
+  assert(level <= TieredStopAtLevel, "Invalid compilation level");
   if (level == CompLevel_none) {
     return;
   }
@@ -224,10 +223,10 @@
   if (bci != InvocationEntryBci && mh->is_not_osr_compilable()) {
     return;
   }
-  if (PrintTieredEvents) {
-    print_event(COMPILE, mh, mh, bci, level);
-  }
   if (!CompileBroker::compilation_is_in_queue(mh, bci)) {
+    if (PrintTieredEvents) {
+      print_event(COMPILE, mh, mh, bci, level);
+    }
     submit_compile(mh, bci, level, THREAD);
   }
 }
@@ -285,45 +284,48 @@
 
 // Common transition function. Given a predicate determines if a method should transition to another level.
 CompLevel SimpleThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level) {
-  if (is_trivial(method)) return CompLevel_simple;
-
   CompLevel next_level = cur_level;
   int i = method->invocation_count();
   int b = method->backedge_count();
 
-  switch(cur_level) {
-  case CompLevel_none:
-    // If we were at full profile level, would we switch to full opt?
-    if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
-      next_level = CompLevel_full_optimization;
-    } else if ((this->*p)(i, b, cur_level)) {
-      next_level = CompLevel_full_profile;
-    }
-    break;
-  case CompLevel_limited_profile:
-  case CompLevel_full_profile:
-    {
-      methodDataOop mdo = method->method_data();
-      if (mdo != NULL) {
-        if (mdo->would_profile()) {
-          int mdo_i = mdo->invocation_count_delta();
-          int mdo_b = mdo->backedge_count_delta();
-          if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+  if (is_trivial(method)) {
+    next_level = CompLevel_simple;
+  } else {
+    switch(cur_level) {
+    case CompLevel_none:
+      // If we were at full profile level, would we switch to full opt?
+      if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
+        next_level = CompLevel_full_optimization;
+      } else if ((this->*p)(i, b, cur_level)) {
+        next_level = CompLevel_full_profile;
+      }
+      break;
+    case CompLevel_limited_profile:
+    case CompLevel_full_profile:
+      {
+        methodDataOop mdo = method->method_data();
+        if (mdo != NULL) {
+          if (mdo->would_profile()) {
+            int mdo_i = mdo->invocation_count_delta();
+            int mdo_b = mdo->backedge_count_delta();
+            if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+              next_level = CompLevel_full_optimization;
+            }
+          } else {
             next_level = CompLevel_full_optimization;
           }
-        } else {
-          next_level = CompLevel_full_optimization;
         }
       }
+      break;
     }
-    break;
   }
-  return next_level;
+  return MIN2(next_level, (CompLevel)TieredStopAtLevel);
 }
 
 // Determine if a method should be compiled with a normal entry point at a different level.
 CompLevel SimpleThresholdPolicy::call_event(methodOop method,  CompLevel cur_level) {
-  CompLevel osr_level = (CompLevel) method->highest_osr_comp_level();
+  CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
+                             common(&SimpleThresholdPolicy::loop_predicate, method, cur_level));
   CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level);
 
   // If OSR method level is greater than the regular method level, the levels should be
@@ -344,21 +346,22 @@
 
 // Determine if we should do an OSR compilation of a given method.
 CompLevel SimpleThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) {
+  CompLevel next_level = common(&SimpleThresholdPolicy::loop_predicate, method, cur_level);
   if (cur_level == CompLevel_none) {
     // If there is a live OSR method that means that we deopted to the interpreter
     // for the transition.
-    CompLevel osr_level = (CompLevel)method->highest_osr_comp_level();
+    CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
     if (osr_level > CompLevel_none) {
       return osr_level;
     }
   }
-  return common(&SimpleThresholdPolicy::loop_predicate, method, cur_level);
+  return next_level;
 }
 
 
 // Handle the invocation event.
 void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
-                                              CompLevel level, TRAPS) {
+                                              CompLevel level, nmethod* nm, TRAPS) {
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
@@ -370,7 +373,7 @@
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
 void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
-                                               int bci, CompLevel level, TRAPS) {
+                                                     int bci, CompLevel level, nmethod* nm, TRAPS) {
   // If the method is already compiling, quickly bail out.
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
     // Use loop event as an opportinity to also check there's been
--- a/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/simpleThresholdPolicy.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -62,7 +62,7 @@
   void set_c1_count(int x) { _c1_count = x;    }
   void set_c2_count(int x) { _c2_count = x;    }
 
-  enum EventType { CALL, LOOP, COMPILE, KILL, UPDATE, REPROFILE };
+  enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
   void print_event(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level);
   // Print policy-specific information if necessary
   virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
@@ -88,9 +88,9 @@
     return CompLevel_none;
   }
   virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
-                                       CompLevel level, TRAPS);
+                                       CompLevel level, nmethod* nm, TRAPS);
   virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
-                                        int bci, CompLevel level, TRAPS);
+                                        int bci, CompLevel level, nmethod* nm, TRAPS);
 public:
   SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
   virtual int compiler_count(CompLevel comp_level) {
@@ -98,20 +98,24 @@
     if (is_c2_compile(comp_level)) return c2_count();
     return 0;
   }
+  virtual CompLevel initial_compile_level() { return MIN2((CompLevel)TieredStopAtLevel, CompLevel_initial_compile); }
   virtual void do_safepoint_work() { }
   virtual void delay_compilation(methodOop method) { }
   virtual void disable_compilation(methodOop method) { }
-  // TODO: we should honour reprofiling requests in the future. Currently reprofiling
-  // would happen but not to the extent we would ideally like.
   virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
   virtual nmethod* event(methodHandle method, methodHandle inlinee,
-                         int branch_bci, int bci, CompLevel comp_level, TRAPS);
+                         int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
   // Select task is called by CompileBroker. We should return a task or NULL.
   virtual CompileTask* select_task(CompileQueue* compile_queue);
   // Tell the runtime if we think a given method is adequately profiled.
   virtual bool is_mature(methodOop method);
   // Initialize: set compiler thread count
   virtual void initialize();
+  virtual bool should_not_inline(ciEnv* env, ciMethod* callee) {
+    return (env->comp_level() == CompLevel_limited_profile ||
+            env->comp_level() == CompLevel_full_profile) &&
+            callee->has_loops();
+  }
 };
 
 #endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP
--- a/src/share/vm/runtime/stubRoutines.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/stubRoutines.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -51,8 +51,6 @@
 address StubRoutines::_forward_exception_entry                  = NULL;
 address StubRoutines::_throw_AbstractMethodError_entry          = NULL;
 address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL;
-address StubRoutines::_throw_ArithmeticException_entry          = NULL;
-address StubRoutines::_throw_NullPointerException_entry         = NULL;
 address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
 address StubRoutines::_throw_StackOverflowError_entry           = NULL;
 address StubRoutines::_throw_WrongMethodTypeException_entry     = NULL;
@@ -108,6 +106,7 @@
 address StubRoutines::_arrayof_oop_disjoint_arraycopy    = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy);
 address StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit  = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit);
 
+address StubRoutines::_zero_aligned_words = CAST_FROM_FN_PTR(address, Copy::zero_to_words);
 
 address StubRoutines::_checkcast_arraycopy               = NULL;
 address StubRoutines::_checkcast_arraycopy_uninit        = NULL;
--- a/src/share/vm/runtime/stubRoutines.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/stubRoutines.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -128,8 +128,6 @@
   static address _catch_exception_entry;
   static address _throw_AbstractMethodError_entry;
   static address _throw_IncompatibleClassChangeError_entry;
-  static address _throw_ArithmeticException_entry;
-  static address _throw_NullPointerException_entry;
   static address _throw_NullPointerException_at_call_entry;
   static address _throw_StackOverflowError_entry;
   static address _throw_WrongMethodTypeException_entry;
@@ -199,6 +197,9 @@
   static address _arrayof_jshort_fill;
   static address _arrayof_jint_fill;
 
+  // zero heap space aligned to jlong (8 bytes)
+  static address _zero_aligned_words;
+
   // These are versions of the java.lang.Math methods which perform
   // the same operations as the intrinsic version.  They are used for
   // constant folding in the compiler to ensure equivalence.  If the
@@ -251,8 +252,6 @@
   // Implicit exceptions
   static address throw_AbstractMethodError_entry()         { return _throw_AbstractMethodError_entry; }
   static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; }
-  static address throw_ArithmeticException_entry()         { return _throw_ArithmeticException_entry; }
-  static address throw_NullPointerException_entry()        { return _throw_NullPointerException_entry; }
   static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
   static address throw_StackOverflowError_entry()          { return _throw_StackOverflowError_entry; }
   static address throw_WrongMethodTypeException_entry()    { return _throw_WrongMethodTypeException_entry; }
@@ -332,6 +331,7 @@
 
   static address select_fill_function(BasicType t, bool aligned, const char* &name);
 
+  static address zero_aligned_words()   { return _zero_aligned_words; }
 
   static double  intrinsic_log(double d) {
     assert(_intrinsic_log != NULL, "must be defined");
--- a/src/share/vm/runtime/thread.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/thread.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -749,8 +749,9 @@
   jint thread_parity = _oops_do_parity;
   if (thread_parity != strong_roots_parity) {
     jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
-    if (res == thread_parity) return true;
-    else {
+    if (res == thread_parity) {
+      return true;
+    } else {
       guarantee(res == strong_roots_parity, "Or else what?");
       assert(SharedHeap::heap()->n_par_threads() > 0,
              "Should only fail when parallel.");
@@ -966,7 +967,7 @@
 // General purpose hook into Java code, run once when the VM is initialized.
 // The Java library method itself may be changed independently from the VM.
 static void call_postVMInitHook(TRAPS) {
-  klassOop k = SystemDictionary::sun_misc_PostVMInitHook_klass();
+  klassOop k = SystemDictionary::PostVMInitHook_klass();
   instanceKlassHandle klass (THREAD, k);
   if (klass.not_null()) {
     JavaValue result(T_VOID);
@@ -1272,7 +1273,6 @@
   _exception_oop = NULL;
   _exception_pc  = 0;
   _exception_handler_pc = 0;
-  _exception_stack_size = 0;
   _is_method_handle_return = 0;
   _jvmti_thread_state= NULL;
   _should_post_on_exceptions_flag = JNI_FALSE;
@@ -2860,6 +2860,44 @@
   }
 }
 
+class PrintAndVerifyOopClosure: public OopClosure {
+ protected:
+  template <class T> inline void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+    if (obj == NULL) return;
+    tty->print(INTPTR_FORMAT ": ", p);
+    if (obj->is_oop_or_null()) {
+      if (obj->is_objArray()) {
+        tty->print_cr("valid objArray: " INTPTR_FORMAT, (oopDesc*) obj);
+      } else {
+        obj->print();
+      }
+    } else {
+      tty->print_cr("invalid oop: " INTPTR_FORMAT, (oopDesc*) obj);
+    }
+    tty->cr();
+  }
+ public:
+  virtual void do_oop(oop* p) { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_work(p); }
+};
+
+
+static void oops_print(frame* f, const RegisterMap *map) {
+  PrintAndVerifyOopClosure print;
+  f->print_value();
+  f->oops_do(&print, NULL, (RegisterMap*)map);
+}
+
+// Print our all the locations that contain oops and whether they are
+// valid or not.  This useful when trying to find the oldest frame
+// where an oop has gone bad since the frame walk is from youngest to
+// oldest.
+void JavaThread::trace_oops() {
+  tty->print_cr("[Trace oops]");
+  frames_do(oops_print);
+}
+
 
 #ifdef ASSERT
 // Print or validate the layout of stack frames
@@ -3347,7 +3385,9 @@
   // Notify JVMTI agents that VM initialization is complete - nop if no agents.
   JvmtiExport::post_vm_initialized();
 
-  Chunk::start_chunk_pool_cleaner_task();
+  if (CleanChunkPoolAsync) {
+    Chunk::start_chunk_pool_cleaner_task();
+  }
 
   // initialize compiler(s)
   CompileBroker::compilation_init();
@@ -3698,6 +3738,14 @@
     // heap is unparseable if they are caught. Grab the Heap_lock
     // to prevent this. The GC vm_operations will not be able to
     // queue until after the vm thread is dead.
+    // After this point, we'll never emerge out of the safepoint before
+    // the VM exits, so concurrent GC threads do not need to be explicitly
+    // stopped; they remain inactive until the process exits.
+    // Note: some concurrent G1 threads may be running during a safepoint,
+    // but these will not be accessing the heap, just some G1-specific side
+    // data structures that are not accessed by any other threads but them
+    // after this point in a terminal safepoint.
+
     MutexLocker ml(Heap_lock);
 
     VMThread::wait_for_vm_thread_exit();
@@ -3858,8 +3906,9 @@
     }
   }
   VMThread* vmt = VMThread::vm_thread();
-  if (vmt->claim_oops_do(is_par, cp))
+  if (vmt->claim_oops_do(is_par, cp)) {
     vmt->oops_do(f, cf);
+  }
 }
 
 #ifndef SERIALGC
--- a/src/share/vm/runtime/thread.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/thread.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -841,7 +841,6 @@
   volatile oop     _exception_oop;               // Exception thrown in compiled code
   volatile address _exception_pc;                // PC where exception happened
   volatile address _exception_handler_pc;        // PC for handler of exception
-  volatile int     _exception_stack_size;        // Size of frame where exception happened
   volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 
   // support for compilation
@@ -1182,7 +1181,6 @@
 
   // Exception handling for compiled methods
   oop      exception_oop() const                 { return _exception_oop; }
-  int      exception_stack_size() const          { return _exception_stack_size; }
   address  exception_pc() const                  { return _exception_pc; }
   address  exception_handler_pc() const          { return _exception_handler_pc; }
   bool     is_method_handle_return() const       { return _is_method_handle_return == 1; }
@@ -1190,7 +1188,6 @@
   void set_exception_oop(oop o)                  { _exception_oop = o; }
   void set_exception_pc(address a)               { _exception_pc = a; }
   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
-  void set_exception_stack_size(int size)        { _exception_stack_size = size; }
   void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
 
   // Stack overflow support
@@ -1264,7 +1261,6 @@
   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
-  static ByteSize exception_stack_size_offset()  { return byte_offset_of(JavaThread, _exception_stack_size); }
   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
@@ -1379,6 +1375,7 @@
   void trace_stack()                             PRODUCT_RETURN;
   void trace_stack_from(vframe* start_vf)        PRODUCT_RETURN;
   void trace_frames()                            PRODUCT_RETURN;
+  void trace_oops()                              PRODUCT_RETURN;
 
   // Print an annotated view of the stack frames
   void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN;
--- a/src/share/vm/runtime/vframeArray.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/vframeArray.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@
 // represent an interpreter frame which will eventually be created.
 
 class vframeArrayElement : public _ValueObj {
+  friend class VMStructs;
+
   private:
 
     frame _frame;                                                // the interpreter frame we will unpack into
@@ -107,6 +109,8 @@
 // at the data in each vframeElement
 
 class vframeArray: public CHeapObj {
+  friend class VMStructs;
+
  private:
 
 
--- a/src/share/vm/runtime/virtualspace.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/virtualspace.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,7 @@
   assert(len >= required_size, "len too small");
 
   const size_t s = size_t(addr);
-  const size_t beg_ofs = s + prefix_size & suffix_align - 1;
+  const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
 
   if (len < beg_delta + required_size) {
@@ -113,8 +113,8 @@
     assert(res >= raw, "alignment decreased start addr");
     assert(res + prefix_size + suffix_size <= raw + reserve_size,
            "alignment increased end addr");
-    assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
-    assert((res + prefix_size & suffix_align - 1) == 0,
+    assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
+    assert(((res + prefix_size) & (suffix_align - 1)) == 0,
            "bad alignment of suffix");
   }
 #endif
@@ -135,7 +135,7 @@
     assert(UseCompressedOops, "currently requested address used only for compressed oops");
     if (PrintCompressedOopsMode) {
       tty->cr();
-      tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
+      tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
     }
     // OS ignored requested address. Try different address.
     if (special) {
@@ -162,11 +162,11 @@
   assert(prefix_align != 0, "sanity");
   assert(suffix_size != 0, "sanity");
   assert(suffix_align != 0, "sanity");
-  assert((prefix_size & prefix_align - 1) == 0,
+  assert((prefix_size & (prefix_align - 1)) == 0,
     "prefix_size not divisible by prefix_align");
-  assert((suffix_size & suffix_align - 1) == 0,
+  assert((suffix_size & (suffix_align - 1)) == 0,
     "suffix_size not divisible by suffix_align");
-  assert((suffix_align & prefix_align - 1) == 0,
+  assert((suffix_align & (prefix_align - 1)) == 0,
     "suffix_align not divisible by prefix_align");
 
   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
@@ -210,8 +210,8 @@
   if (addr == NULL) return;
 
   // Check whether the result has the needed alignment (unlikely unless
-  // prefix_align == suffix_align).
-  const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
+  // prefix_align < suffix_align).
+  const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
   if (ofs != 0) {
     // Wrong alignment.  Release, allocate more space and do manual alignment.
     //
@@ -232,6 +232,15 @@
       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
                                prefix_align, suffix_size, suffix_align);
     }
+
+    if (requested_address != 0 &&
+        failed_to_reserve_as_requested(addr, requested_address, size, false)) {
+      // As a result of the alignment constraints, the allocated addr differs
+      // from the requested address. Return back to the caller who can
+      // take remedial action (like try again without a requested address).
+      assert(_base == NULL, "should be");
+      return;
+    }
   }
 
   _base = addr;
@@ -245,13 +254,19 @@
                                const size_t noaccess_prefix,
                                bool executable) {
   const size_t granularity = os::vm_allocation_granularity();
-  assert((size & granularity - 1) == 0,
+  assert((size & (granularity - 1)) == 0,
          "size not aligned to os::vm_allocation_granularity()");
-  assert((alignment & granularity - 1) == 0,
+  assert((alignment & (granularity - 1)) == 0,
          "alignment not aligned to os::vm_allocation_granularity()");
   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
          "not a power of 2");
 
+  alignment = MAX2(alignment, (size_t)os::vm_page_size());
+
+  // Assert that if noaccess_prefix is used, it is the same as alignment.
+  assert(noaccess_prefix == 0 ||
+         noaccess_prefix == alignment, "noaccess prefix wrong");
+
   _base = NULL;
   _size = 0;
   _special = false;
@@ -282,10 +297,8 @@
         return;
       }
       // Check alignment constraints
-      if (alignment > 0) {
-        assert((uintptr_t) base % alignment == 0,
-               "Large pages returned a non-aligned address");
-      }
+      assert((uintptr_t) base % alignment == 0,
+             "Large pages returned a non-aligned address");
       _special = true;
     } else {
       // failed; try to reserve regular memory below
@@ -321,7 +334,7 @@
     if (base == NULL) return;
 
     // Check alignment constraints
-    if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
+    if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
       // Base not aligned, retry
       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
       // Reserve size large enough to do manual alignment and
@@ -338,12 +351,21 @@
         os::release_memory(extra_base, extra_size);
         base = os::reserve_memory(size, base);
       } while (base == NULL);
+
+      if (requested_address != 0 &&
+          failed_to_reserve_as_requested(base, requested_address, size, false)) {
+        // As a result of the alignment constraints, the allocated base differs
+        // from the requested address. Return back to the caller who can
+        // take remedial action (like try again without a requested address).
+        assert(_base == NULL, "should be");
+        return;
+      }
     }
   }
   // Done
   _base = base;
   _size = size;
-  _alignment = MAX2(alignment, (size_t) os::vm_page_size());
+  _alignment = alignment;
   _noaccess_prefix = noaccess_prefix;
 
   // Assert that if noaccess_prefix is used, it is the same as alignment.
--- a/src/share/vm/runtime/vmStructs.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/vmStructs.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -29,6 +29,11 @@
 #include "classfile/placeholders.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "ci/ciField.hpp"
+#include "ci/ciInstance.hpp"
+#include "ci/ciObjArrayKlass.hpp"
+#include "ci/ciMethodData.hpp"
+#include "ci/ciSymbol.hpp"
 #include "code/codeBlob.hpp"
 #include "code/codeCache.hpp"
 #include "code/compressedStream.hpp"
@@ -38,6 +43,7 @@
 #include "code/stubs.hpp"
 #include "code/vmreg.hpp"
 #include "compiler/oopMap.hpp"
+#include "compiler/compileBroker.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
 #include "gc_implementation/shared/immutableSpace.hpp"
 #include "gc_implementation/shared/markSweep.hpp"
@@ -90,6 +96,8 @@
 #include "oops/typeArrayOop.hpp"
 #include "prims/jvmtiAgentThread.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/vframeArray.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
@@ -165,9 +173,26 @@
 #include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
 #include "gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp"
+#include "gc_implementation/g1/vmStructs_g1.hpp"
 #endif
 #ifdef COMPILER2
+#include "opto/addnode.hpp"
+#include "opto/block.hpp"
+#include "opto/callnode.hpp"
+#include "opto/cfgnode.hpp"
+#include "opto/chaitin.hpp"
+#include "opto/divnode.hpp"
+#include "opto/locknode.hpp"
+#include "opto/loopnode.hpp"
+#include "opto/machnode.hpp"
 #include "opto/matcher.hpp"
+#include "opto/mulnode.hpp"
+#include "opto/phaseX.hpp"
+#include "opto/parse.hpp"
+#include "opto/regalloc.hpp"
+#include "opto/rootnode.hpp"
+#include "opto/subnode.hpp"
+#include "opto/vectornode.hpp"
 #ifdef TARGET_ARCH_MODEL_x86_32
 # include "adfiles/adGlobals_x86_32.hpp"
 #endif
@@ -261,6 +286,7 @@
   nonstatic_field(instanceKlass,               _nof_implementors,                             int)                                   \
   nonstatic_field(instanceKlass,               _implementors[0],                              klassOop)                              \
   nonstatic_field(instanceKlass,               _fields,                                       typeArrayOop)                          \
+  nonstatic_field(instanceKlass,               _java_fields_count,                             int)                                   \
   nonstatic_field(instanceKlass,               _constants,                                    constantPoolOop)                       \
   nonstatic_field(instanceKlass,               _class_loader,                                 oop)                                   \
   nonstatic_field(instanceKlass,               _protection_domain,                            oop)                                   \
@@ -293,6 +319,10 @@
   nonstatic_field(instanceKlass,               _methods_annotations,                          objArrayOop)                           \
   nonstatic_field(instanceKlass,               _methods_parameter_annotations,                objArrayOop)                           \
   nonstatic_field(instanceKlass,               _methods_default_annotations,                  objArrayOop)                           \
+  nonstatic_field(instanceKlass,               _dependencies,                                 nmethodBucket*)                        \
+  nonstatic_field(nmethodBucket,               _nmethod,                                      nmethod*)                              \
+  nonstatic_field(nmethodBucket,               _count,                                        int)                                   \
+  nonstatic_field(nmethodBucket,               _next,                                         nmethodBucket*)                        \
   nonstatic_field(Klass,                       _super_check_offset,                           juint)                                 \
   nonstatic_field(Klass,                       _secondary_super_cache,                        klassOop)                              \
   nonstatic_field(Klass,                       _secondary_supers,                             objArrayOop)                           \
@@ -311,17 +341,26 @@
   nonstatic_field(methodKlass,                 _alloc_size,                                   juint)                                 \
   nonstatic_field(methodDataOopDesc,           _size,                                         int)                                   \
   nonstatic_field(methodDataOopDesc,           _method,                                       methodOop)                             \
+  nonstatic_field(methodDataOopDesc,           _data_size,                                    int)                                   \
+  nonstatic_field(methodDataOopDesc,           _data[0],                                      intptr_t)                              \
+  nonstatic_field(methodDataOopDesc,           _nof_decompiles,                               uint)                                  \
+  nonstatic_field(methodDataOopDesc,           _nof_overflow_recompiles,                      uint)                                  \
+  nonstatic_field(methodDataOopDesc,           _nof_overflow_traps,                           uint)                                  \
+  nonstatic_field(methodDataOopDesc,           _eflags,                                       intx)                                  \
+  nonstatic_field(methodDataOopDesc,           _arg_local,                                    intx)                                  \
+  nonstatic_field(methodDataOopDesc,           _arg_stack,                                    intx)                                  \
+  nonstatic_field(methodDataOopDesc,           _arg_returned,                                 intx)                                  \
   nonstatic_field(methodOopDesc,               _constMethod,                                  constMethodOop)                        \
   nonstatic_field(methodOopDesc,               _constants,                                    constantPoolOop)                       \
-  c2_nonstatic_field(methodOopDesc,            _method_data,                                  methodDataOop)                         \
-  c2_nonstatic_field(methodOopDesc,            _interpreter_invocation_count,                 int)                                   \
+  nonstatic_field(methodOopDesc,               _method_data,                                  methodDataOop)                         \
+  nonstatic_field(methodOopDesc,               _interpreter_invocation_count,                 int)                                   \
   nonstatic_field(methodOopDesc,               _access_flags,                                 AccessFlags)                           \
   nonstatic_field(methodOopDesc,               _vtable_index,                                 int)                                   \
   nonstatic_field(methodOopDesc,               _method_size,                                  u2)                                    \
   nonstatic_field(methodOopDesc,               _max_stack,                                    u2)                                    \
   nonstatic_field(methodOopDesc,               _max_locals,                                   u2)                                    \
   nonstatic_field(methodOopDesc,               _size_of_parameters,                           u2)                                    \
-  c2_nonstatic_field(methodOopDesc,            _interpreter_throwout_count,                   u2)                                    \
+  nonstatic_field(methodOopDesc,               _interpreter_throwout_count,                   u2)                                    \
   nonstatic_field(methodOopDesc,               _number_of_breakpoints,                        u2)                                    \
   nonstatic_field(methodOopDesc,               _invocation_counter,                           InvocationCounter)                     \
   nonstatic_field(methodOopDesc,               _backedge_counter,                             InvocationCounter)                     \
@@ -407,7 +446,7 @@
      static_field(Universe,                    _constantPoolCacheKlassObj,                    klassOop)                              \
      static_field(Universe,                    _compiledICHolderKlassObj,                     klassOop)                              \
      static_field(Universe,                    _systemObjArrayKlassObj,                       klassOop)                              \
-     static_field(Universe,                    _mirrors[0],                                   oop)                                  \
+     static_field(Universe,                    _mirrors[0],                                   oop)                                   \
      static_field(Universe,                    _main_thread_group,                            oop)                                   \
      static_field(Universe,                    _system_thread_group,                          oop)                                   \
      static_field(Universe,                    _the_empty_byte_array,                         typeArrayOop)                          \
@@ -652,9 +691,16 @@
       static_field(SystemDictionary,            WK_KLASS(ThreadGroup_klass),                   klassOop)                             \
       static_field(SystemDictionary,            WK_KLASS(Properties_klass),                    klassOop)                             \
       static_field(SystemDictionary,            WK_KLASS(StringBuffer_klass),                  klassOop)                             \
+      static_field(SystemDictionary,            WK_KLASS(MethodHandle_klass),                  klassOop)                             \
       static_field(SystemDictionary,            _box_klasses[0],                               klassOop)                             \
       static_field(SystemDictionary,            _java_system_loader,                           oop)                                  \
                                                                                                                                      \
+  /*************/                                                                                                                    \
+  /* vmSymbols */                                                                                                                    \
+  /*************/                                                                                                                    \
+                                                                                                                                     \
+      static_field(vmSymbols,                   _symbols[0],                                  Symbol*)                               \
+                                                                                                                                     \
   /*******************/                                                                                                              \
   /* HashtableBucket */                                                                                                              \
   /*******************/                                                                                                              \
@@ -707,6 +753,15 @@
   nonstatic_field(LoaderConstraintEntry,       _max_loaders,                                  int)                                   \
   nonstatic_field(LoaderConstraintEntry,       _loaders,                                      oop*)                                  \
                                                                                                                                      \
+  /*******************/                                                                                                              \
+  /* GrowableArrays  */                                                                                                              \
+  /*******************/                                                                                                              \
+                                                                                                                                     \
+  nonstatic_field(GenericGrowableArray,        _len,                                          int)                                   \
+  nonstatic_field(GenericGrowableArray,        _max,                                          int)                                   \
+  nonstatic_field(GenericGrowableArray,        _arena,                                        Arena*)                                \
+  nonstatic_field(GrowableArray<int>,               _data,                                         int*) \
+                                                                                                                                     \
   /********************************/                                                                                                 \
   /* CodeCache (NOTE: incomplete) */                                                                                                 \
   /********************************/                                                                                                 \
@@ -757,12 +812,19 @@
   nonstatic_field(PcDesc,                      _pc_offset,                                    int)                                   \
   nonstatic_field(PcDesc,                      _scope_decode_offset,                          int)                                   \
   nonstatic_field(PcDesc,                      _obj_decode_offset,                            int)                                   \
-  nonstatic_field(PcDesc,                      _flags,                        PcDesc::PcDescFlags)                                   \
+  nonstatic_field(PcDesc,                      _flags,                                        int)                                   \
                                                                                                                                      \
   /***************************************************/                                                                              \
   /* CodeBlobs (NOTE: incomplete, but only a little) */                                                                              \
   /***************************************************/                                                                              \
                                                                                                                                      \
+  X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_pc,                                     address))                   \
+  X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _exact_sender_sp,                              intptr_t*))                  \
+  X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_link,                                  intptr_t*))                  \
+  X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _saved_args_base,                              intptr_t*))                  \
+                                                                                                                                     \
+     static_field(SharedRuntime,               _ricochet_blob,                                RicochetBlob*)                         \
+                                                                                                                                     \
   nonstatic_field(CodeBlob,                    _name,                                         const char*)                           \
   nonstatic_field(CodeBlob,                    _size,                                         int)                                   \
   nonstatic_field(CodeBlob,                    _header_size,                                  int)                                   \
@@ -774,6 +836,8 @@
   nonstatic_field(CodeBlob,                    _frame_size,                                   int)                                   \
   nonstatic_field(CodeBlob,                    _oop_maps,                                     OopMapSet*)                            \
                                                                                                                                      \
+  nonstatic_field(RuntimeStub,                 _caller_must_gc_arguments,                     bool)                                  \
+                                                                                                                                     \
   /**************************************************/                                                                               \
   /* NMethods (NOTE: incomplete, but only a little) */                                                                               \
   /**************************************************/                                                                               \
@@ -786,6 +850,7 @@
   nonstatic_field(nmethod,             _state,                                        unsigned char)                         \
   nonstatic_field(nmethod,             _exception_offset,                             int)                                   \
   nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
+  nonstatic_field(nmethod,             _deoptimize_mh_offset,                         int)                                   \
   nonstatic_field(nmethod,             _orig_pc_offset,                               int)                                   \
   nonstatic_field(nmethod,             _stub_offset,                                  int)                                   \
   nonstatic_field(nmethod,             _consts_offset,                                int)                                   \
@@ -802,8 +867,14 @@
   nonstatic_field(nmethod,             _lock_count,                                   jint)                                  \
   nonstatic_field(nmethod,             _stack_traversal_mark,                         long)                                  \
   nonstatic_field(nmethod,             _compile_id,                                   int)                                   \
+  nonstatic_field(nmethod,             _exception_cache,                              ExceptionCache*)                       \
   nonstatic_field(nmethod,             _marked_for_deoptimization,                    bool)                                  \
                                                                                                                                      \
+  nonstatic_field(RicochetBlob,        _bounce_offset,                                int)                                           \
+  nonstatic_field(RicochetBlob,        _exception_offset,                             int)                                           \
+                                                                                                                                     \
+  unchecked_c2_static_field(Deoptimization,         _trap_reason_name,                   void*)                                         \
+                                                                                                                                     \
   /********************************/                                                                                                 \
   /* JavaCalls (NOTE: incomplete) */                                                                                                 \
   /********************************/                                                                                                 \
@@ -824,6 +895,9 @@
      static_field(Threads,                     _number_of_non_daemon_threads,                 int)                                   \
      static_field(Threads,                     _return_code,                                  int)                                   \
                                                                                                                                      \
+  nonstatic_field(ThreadShadow,                _pending_exception,                            oop)                                   \
+  nonstatic_field(ThreadShadow,                _exception_file,                               const char*)                           \
+  nonstatic_field(ThreadShadow,                _exception_line,                               int)                                   \
    volatile_nonstatic_field(Thread,            _suspend_flags,                                uint32_t)                              \
   nonstatic_field(Thread,                      _active_handles,                               JNIHandleBlock*)                       \
   nonstatic_field(Thread,                      _tlab,                                         ThreadLocalAllocBuffer)                \
@@ -835,10 +909,22 @@
   nonstatic_field(JavaThread,                  _next,                                         JavaThread*)                           \
   nonstatic_field(JavaThread,                  _threadObj,                                    oop)                                   \
   nonstatic_field(JavaThread,                  _anchor,                                       JavaFrameAnchor)                       \
+  nonstatic_field(JavaThread,                  _vm_result,                                    oop)                                   \
+  nonstatic_field(JavaThread,                  _vm_result_2,                                  oop)                                   \
+  nonstatic_field(JavaThread,                  _pending_async_exception,                      oop)                                   \
+  volatile_nonstatic_field(JavaThread,         _exception_oop,                                oop)                                   \
+  volatile_nonstatic_field(JavaThread,         _exception_pc,                                 address)                               \
+  nonstatic_field(JavaThread,                  _is_compiling,                                 bool)                                  \
+  nonstatic_field(JavaThread,                  _special_runtime_exit_condition,               JavaThread::AsyncRequests)             \
+  nonstatic_field(JavaThread,                  _saved_exception_pc,                           address)                               \
    volatile_nonstatic_field(JavaThread,        _thread_state,                                 JavaThreadState)                       \
   nonstatic_field(JavaThread,                  _osthread,                                     OSThread*)                             \
   nonstatic_field(JavaThread,                  _stack_base,                                   address)                               \
   nonstatic_field(JavaThread,                  _stack_size,                                   size_t)                                \
+  nonstatic_field(JavaThread,                  _vframe_array_head,                            vframeArray*)                          \
+  nonstatic_field(JavaThread,                  _vframe_array_last,                            vframeArray*)                          \
+  nonstatic_field(Thread,                      _resource_area,                                ResourceArea*)                         \
+  nonstatic_field(CompilerThread,              _env,                                          ciEnv*)                                \
                                                                                                                                      \
   /************/                                                                                                                     \
   /* OSThread */                                                                                                                     \
@@ -888,7 +974,83 @@
   /* Runtime1 (NOTE: incomplete) */                                                                                                  \
   /*******************************/                                                                                                  \
                                                                                                                                      \
-  unchecked_c1_static_field(Runtime1,          _blobs,                                        sizeof(Runtime1::_blobs)) /* NOTE: no type */ \
+  unchecked_c1_static_field(Runtime1,          _blobs,                                 sizeof(Runtime1::_blobs)) /* NOTE: no type */ \
+                                                                                                                                     \
+  /**************/                                                                                                                   \
+  /* allocation */                                                                                                                   \
+  /**************/                                                                                                                   \
+                                                                                                                                     \
+  nonstatic_field(Chunk, _next, Chunk*)                                                                                              \
+  nonstatic_field(Chunk, _len, const size_t)                                                                                         \
+                                                                                                                                     \
+  nonstatic_field(Arena, _first, Chunk*)                                                                                             \
+  nonstatic_field(Arena, _chunk, Chunk*)                                                                                             \
+  nonstatic_field(Arena, _hwm, char*)                                                                                                \
+  nonstatic_field(Arena, _max, char*)                                                                                                \
+                                                                                                                                     \
+  /************/                                                                                                                     \
+  /* CI */                                                                                                                           \
+  /************/                                                                                                                     \
+                                                                                                                                     \
+ nonstatic_field(ciEnv,               _system_dictionary_modification_counter, int)                                                  \
+ nonstatic_field(ciEnv,               _compiler_data, void*)                                                                         \
+ nonstatic_field(ciEnv,               _failure_reason, const char*)                                                                  \
+ nonstatic_field(ciEnv,               _factory, ciObjectFactory*)                                                                    \
+ nonstatic_field(ciEnv,               _dependencies, Dependencies*)                                                                  \
+ nonstatic_field(ciEnv,               _task, CompileTask*)                                                                           \
+ nonstatic_field(ciEnv,               _arena, Arena*)                                                                                \
+                                                                                                                                     \
+ nonstatic_field(ciObject,    _handle, jobject)                                                                                      \
+ nonstatic_field(ciObject,    _klass, ciKlass*)                                                                                      \
+ nonstatic_field(ciObject,    _ident, uint)                                                                                          \
+                                                                                                                                     \
+ nonstatic_field(ciSymbol,    _ident, uint)                                                                                          \
+ nonstatic_field(ciSymbol,    _symbol, Symbol*)                                                                                      \
+                                                                                                                                     \
+ nonstatic_field(ciType,    _basic_type, BasicType)                                                                                  \
+                                                                                                                                     \
+ nonstatic_field(ciKlass,   _name, ciSymbol*)                                                                                        \
+                                                                                                                                     \
+ nonstatic_field(ciArrayKlass,   _dimension, jint)                                                                                   \
+                                                                                                                                     \
+ nonstatic_field(ciObjArrayKlass, _element_klass, ciKlass*)                                                                          \
+ nonstatic_field(ciObjArrayKlass, _base_element_klass, ciKlass*)                                                                     \
+                                                                                                                                     \
+ nonstatic_field(ciInstanceKlass,   _init_state, instanceKlass::ClassState)                                                          \
+ nonstatic_field(ciInstanceKlass,   _is_shared,  bool)                                                                               \
+                                                                                                                                     \
+ nonstatic_field(ciMethod,     _interpreter_invocation_count, int)                                                                   \
+ nonstatic_field(ciMethod,     _interpreter_throwout_count, int)                                                                     \
+                                                                                                                                     \
+ nonstatic_field(ciMethodData, _data_size, int)                                                                                      \
+ nonstatic_field(ciMethodData, _state, u_char)                                                                                       \
+ nonstatic_field(ciMethodData, _extra_data_size, int)                                                                                \
+ nonstatic_field(ciMethodData, _data, intptr_t*)                                                                                     \
+ nonstatic_field(ciMethodData, _hint_di, int)                                                                                        \
+ nonstatic_field(ciMethodData, _eflags, intx)                                                                                        \
+ nonstatic_field(ciMethodData, _arg_local, intx)                                                                                     \
+ nonstatic_field(ciMethodData, _arg_stack, intx)                                                                                     \
+ nonstatic_field(ciMethodData, _arg_returned, intx)                                                                                  \
+ nonstatic_field(ciMethodData, _current_mileage, int)                                                                                \
+ nonstatic_field(ciMethodData, _orig, methodDataOopDesc)                                                                             \
+                                                                                                                                     \
+ nonstatic_field(ciField,     _holder, ciInstanceKlass*)                                                                             \
+ nonstatic_field(ciField,     _name, ciSymbol*)                                                                                      \
+ nonstatic_field(ciField,     _signature, ciSymbol*)                                                                                 \
+ nonstatic_field(ciField,     _offset, int)                                                                                          \
+ nonstatic_field(ciField,     _is_constant, bool)                                                                                    \
+ nonstatic_field(ciField,     _constant_value, ciConstant)                                                                           \
+                                                                                                                                     \
+ nonstatic_field(ciObjectFactory,     _ci_objects, GrowableArray<ciObject*>*)                                                        \
+ nonstatic_field(ciObjectFactory,     _symbols, GrowableArray<ciSymbol*>*)                                                           \
+ nonstatic_field(ciObjectFactory,     _unloaded_methods, GrowableArray<ciMethod*>*)                                                  \
+                                                                                                                                     \
+ nonstatic_field(ciConstant,     _type, BasicType)                                                                                   \
+ nonstatic_field(ciConstant,     _value._int, jint)                                                                                  \
+ nonstatic_field(ciConstant,     _value._long, jlong)                                                                                \
+ nonstatic_field(ciConstant,     _value._float, jfloat)                                                                              \
+ nonstatic_field(ciConstant,     _value._double, jdouble)                                                                            \
+ nonstatic_field(ciConstant,     _value._object, ciObject*)                                                                          \
                                                                                                                                      \
   /************/                                                                                                                     \
   /* Monitors */                                                                                                                     \
@@ -910,7 +1072,114 @@
   /* Matcher (C2 only) */                                                                                                            \
   /*********************/                                                                                                            \
                                                                                                                                      \
-  unchecked_c2_static_field(Matcher,           _regEncode,                                    sizeof(Matcher::_regEncode)) /* NOTE: no type */ \
+  unchecked_c2_static_field(Matcher,           _regEncode,                          sizeof(Matcher::_regEncode)) /* NOTE: no type */ \
+                                                                                                                                     \
+  c2_nonstatic_field(Node,               _in,                      Node**)                                                           \
+  c2_nonstatic_field(Node,               _out,                     Node**)                                                           \
+  c2_nonstatic_field(Node,               _cnt,                     node_idx_t)                                                       \
+  c2_nonstatic_field(Node,               _max,                     node_idx_t)                                                       \
+  c2_nonstatic_field(Node,               _outcnt,                  node_idx_t)                                                       \
+  c2_nonstatic_field(Node,               _outmax,                  node_idx_t)                                                       \
+  c2_nonstatic_field(Node,               _idx,                     const node_idx_t)                                                 \
+  c2_nonstatic_field(Node,               _class_id,                jushort)                                                          \
+  c2_nonstatic_field(Node,               _flags,                   jushort)                                                          \
+                                                                                                                                     \
+  c2_nonstatic_field(Compile,            _root,                    RootNode*)                                                        \
+  c2_nonstatic_field(Compile,            _unique,                  uint)                                                             \
+  c2_nonstatic_field(Compile,            _entry_bci,               int)                                                              \
+  c2_nonstatic_field(Compile,            _top,                     Node*)                                                            \
+  c2_nonstatic_field(Compile,            _cfg,                     PhaseCFG*)                                                        \
+  c2_nonstatic_field(Compile,            _regalloc,                PhaseRegAlloc*)                                                   \
+  c2_nonstatic_field(Compile,            _method,                  ciMethod*)                                                        \
+  c2_nonstatic_field(Compile,            _compile_id,              const int)                                                        \
+  c2_nonstatic_field(Compile,            _save_argument_registers, const bool)                                                       \
+  c2_nonstatic_field(Compile,            _subsume_loads,           const bool)                                                       \
+  c2_nonstatic_field(Compile,            _do_escape_analysis,      const bool)                                                       \
+  c2_nonstatic_field(Compile,            _ilt,                     InlineTree*)                                                      \
+                                                                                                                                     \
+  c2_nonstatic_field(InlineTree,         _caller_jvms,             JVMState*)                                                        \
+  c2_nonstatic_field(InlineTree,         _method,                  ciMethod*)                                                        \
+  c2_nonstatic_field(InlineTree,         _caller_tree,             InlineTree*)                                                      \
+  c2_nonstatic_field(InlineTree,         _subtrees,                GrowableArray<InlineTree*>)                                       \
+                                                                                                                                     \
+  c2_nonstatic_field(OptoRegPair,        _first,                   short)                                                            \
+  c2_nonstatic_field(OptoRegPair,        _second,                  short)                                                            \
+                                                                                                                                     \
+  c2_nonstatic_field(JVMState,           _caller,                  JVMState*)                                                        \
+  c2_nonstatic_field(JVMState,           _depth,                   uint)                                                             \
+  c2_nonstatic_field(JVMState,           _locoff,                  uint)                                                             \
+  c2_nonstatic_field(JVMState,           _stkoff,                  uint)                                                             \
+  c2_nonstatic_field(JVMState,           _monoff,                  uint)                                                             \
+  c2_nonstatic_field(JVMState,           _scloff,                  uint)                                                             \
+  c2_nonstatic_field(JVMState,           _endoff,                  uint)                                                             \
+  c2_nonstatic_field(JVMState,           _sp,                      uint)                                                             \
+  c2_nonstatic_field(JVMState,           _bci,                     int)                                                              \
+  c2_nonstatic_field(JVMState,           _method,                  ciMethod*)                                                        \
+  c2_nonstatic_field(JVMState,           _map,                     SafePointNode*)                                                   \
+                                                                                                                                     \
+  c2_nonstatic_field(SafePointNode,      _jvms,                    JVMState* const)                                                  \
+                                                                                                                                     \
+  c2_nonstatic_field(MachSafePointNode,  _jvms,                    JVMState*)                                                        \
+  c2_nonstatic_field(MachSafePointNode,  _jvmadj,                  uint)                                                             \
+                                                                                                                                     \
+  c2_nonstatic_field(MachIfNode,         _prob,                    jfloat)                                                           \
+  c2_nonstatic_field(MachIfNode,         _fcnt,                    jfloat)                                                           \
+                                                                                                                                     \
+  c2_nonstatic_field(CallNode,           _entry_point,             address)                                                          \
+                                                                                                                                     \
+  c2_nonstatic_field(CallJavaNode,       _method,                  ciMethod*)                                                        \
+                                                                                                                                     \
+  c2_nonstatic_field(CallRuntimeNode,    _name,                    const char*)                                                      \
+                                                                                                                                     \
+  c2_nonstatic_field(CallStaticJavaNode, _name,                    const char*)                                                      \
+                                                                                                                                     \
+  c2_nonstatic_field(MachCallJavaNode,   _method,                  ciMethod*)                                                        \
+  c2_nonstatic_field(MachCallJavaNode,   _bci,                     int)                                                              \
+                                                                                                                                     \
+  c2_nonstatic_field(MachCallStaticJavaNode, _name,                const char*)                                                      \
+                                                                                                                                     \
+  c2_nonstatic_field(MachCallRuntimeNode,  _name,                  const char*)                                                      \
+                                                                                                                                     \
+  c2_nonstatic_field(PhaseCFG,           _num_blocks,              uint)                                                             \
+  c2_nonstatic_field(PhaseCFG,           _blocks,                  Block_List)                                                       \
+  c2_nonstatic_field(PhaseCFG,           _bbs,                     Block_Array)                                                      \
+  c2_nonstatic_field(PhaseCFG,           _broot,                   Block*)                                                           \
+                                                                                                                                     \
+  c2_nonstatic_field(PhaseRegAlloc,      _node_regs,               OptoRegPair*)                                                     \
+  c2_nonstatic_field(PhaseRegAlloc,      _node_regs_max_index,     uint)                                                             \
+  c2_nonstatic_field(PhaseRegAlloc,      _framesize,               uint)                                                             \
+  c2_nonstatic_field(PhaseRegAlloc,      _max_reg,                 OptoReg::Name)                                                    \
+                                                                                                                                     \
+  c2_nonstatic_field(PhaseChaitin,       _trip_cnt,                int)                                                              \
+  c2_nonstatic_field(PhaseChaitin,       _alternate,               int)                                                              \
+  c2_nonstatic_field(PhaseChaitin,       _lo_degree,               uint)                                                             \
+  c2_nonstatic_field(PhaseChaitin,       _lo_stk_degree,           uint)                                                             \
+  c2_nonstatic_field(PhaseChaitin,       _hi_degree,               uint)                                                             \
+  c2_nonstatic_field(PhaseChaitin,       _simplified,              uint)                                                             \
+  c2_nonstatic_field(PhaseChaitin,       _maxlrg,                  uint)                                                             \
+                                                                                                                                     \
+  c2_nonstatic_field(Block,              _nodes,                   Node_List)                                                        \
+  c2_nonstatic_field(Block,              _succs,                   Block_Array)                                                      \
+  c2_nonstatic_field(Block,              _num_succs,               uint)                                                             \
+  c2_nonstatic_field(Block,              _pre_order,               uint)                                                             \
+  c2_nonstatic_field(Block,              _dom_depth,               uint)                                                             \
+  c2_nonstatic_field(Block,              _idom,                    Block*)                                                           \
+  c2_nonstatic_field(Block,              _freq,                    jfloat)                                                           \
+                                                                                                                                     \
+  c2_nonstatic_field(CFGElement,         _freq,                    jfloat)                                                           \
+                                                                                                                                     \
+  c2_nonstatic_field(Block_List,         _cnt,                     uint)                                                             \
+                                                                                                                                     \
+  c2_nonstatic_field(Block_Array,        _size,                    uint)                                                             \
+  c2_nonstatic_field(Block_Array,        _blocks,                  Block**)                                                          \
+  c2_nonstatic_field(Block_Array,        _arena,                   Arena*)                                                           \
+                                                                                                                                     \
+  c2_nonstatic_field(Node_List,          _cnt,                     uint)                                                             \
+                                                                                                                                     \
+  c2_nonstatic_field(Node_Array,         _max,                     uint)                                                             \
+  c2_nonstatic_field(Node_Array,         _nodes,                   Node**)                                                           \
+  c2_nonstatic_field(Node_Array,         _a,                       Arena*)                                                           \
+                                                                                                                                     \
                                                                                                                                      \
   /*********************/                                                                                                            \
   /* -XX flags         */                                                                                                            \
@@ -920,6 +1189,7 @@
   nonstatic_field(Flag,                        name,                                          const char*)                           \
   unchecked_nonstatic_field(Flag,              addr,                                          sizeof(void*)) /* NOTE: no type */     \
   nonstatic_field(Flag,                        kind,                                          const char*)                           \
+  nonstatic_field(Flag,                        origin,                                        FlagValueOrigin)                       \
   static_field(Flag,                           flags,                                         Flag*)                                 \
   static_field(Flag,                           numFlags,                                      size_t)                                \
                                                                                                                                      \
@@ -937,7 +1207,14 @@
   nonstatic_field(JDK_Version,                 _partially_initialized,                        bool)                                  \
   nonstatic_field(JDK_Version,                 _major,                                        unsigned char)                         \
                                                                                                                                      \
+  /*************************/                                                                                                        \
+  /* JVMTI */                                                                                                                        \
+  /*************************/                                                                                                        \
                                                                                                                                      \
+  static_field(JvmtiExport,                     _can_access_local_variables,                  bool)                                  \
+  static_field(JvmtiExport,                     _can_hotswap_or_post_breakpoint,              bool)                                  \
+  static_field(JvmtiExport,                     _can_post_on_exceptions,                      bool)                                  \
+  static_field(JvmtiExport,                     _can_walk_any_space,                          bool)                                  \
                                                                                                                                      \
   /*************/                                                                                                                    \
   /* Arguments */                                                                                                                    \
@@ -953,20 +1230,36 @@
   /* java_lang_Class fields        */                                                                                                \
   /*********************************/                                                                                                \
                                                                                                                                      \
-  static_field(java_lang_Class,                klass_offset,                                  int)                                   \
-  static_field(java_lang_Class,                resolved_constructor_offset,                   int)                                   \
-  static_field(java_lang_Class,                array_klass_offset,                            int)                                   \
-  static_field(java_lang_Class,                oop_size_offset,                               int)                                   \
-  static_field(java_lang_Class,                static_oop_field_count_offset,                 int)                                   \
+  static_field(java_lang_Class,                _klass_offset,                                 int)                                   \
+  static_field(java_lang_Class,                _resolved_constructor_offset,                  int)                                   \
+  static_field(java_lang_Class,                _array_klass_offset,                           int)                                   \
+  static_field(java_lang_Class,                _oop_size_offset,                              int)                                   \
+  static_field(java_lang_Class,                _static_oop_field_count_offset,                int)                                   \
                                                                                                                                      \
   /************************/                                                                                                         \
   /* Miscellaneous fields */                                                                                                         \
   /************************/                                                                                                         \
                                                                                                                                      \
-  nonstatic_field(AccessFlags,                 _flags,                                        jint)                                  \
-  nonstatic_field(elapsedTimer,                _counter,                                      jlong)                                 \
-  nonstatic_field(elapsedTimer,                _active,                                       bool)                                  \
-  nonstatic_field(InvocationCounter,           _counter,                                      unsigned int)
+  nonstatic_field(CompileTask,                 _method,                                      jobject)                                \
+  nonstatic_field(CompileTask,                 _osr_bci,                                     int)                                    \
+  nonstatic_field(CompileTask,                 _comp_level,                                  int)                                    \
+  nonstatic_field(CompileTask,                 _compile_id,                                  uint)                                   \
+  nonstatic_field(CompileTask,                 _next,                                        CompileTask*)                           \
+  nonstatic_field(CompileTask,                 _prev,                                        CompileTask*)                           \
+                                                                                                                                     \
+  nonstatic_field(vframeArray,                 _next,                                        vframeArray*)                           \
+  nonstatic_field(vframeArray,                 _original,                                    frame)                                  \
+  nonstatic_field(vframeArray,                 _caller,                                      frame)                                  \
+  nonstatic_field(vframeArray,                 _frames,                                      int)                                    \
+                                                                                                                                     \
+  nonstatic_field(vframeArrayElement,          _frame,                                       frame)                                  \
+  nonstatic_field(vframeArrayElement,          _bci,                                         int)                                    \
+  nonstatic_field(vframeArrayElement,          _method,                                      methodOop)                              \
+                                                                                                                                     \
+  nonstatic_field(AccessFlags,                 _flags,                                       jint)                                   \
+  nonstatic_field(elapsedTimer,                _counter,                                     jlong)                                  \
+  nonstatic_field(elapsedTimer,                _active,                                      bool)                                   \
+  nonstatic_field(InvocationCounter,           _counter,                                     unsigned int)
 
   /* NOTE that we do not use the last_entry() macro here; it is used  */
   /* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must  */
@@ -1046,12 +1339,16 @@
   /* and are valid types for Fields.                                   */ \
   /*********************************************************************/ \
   declare_integer_type(bool)                                              \
+  declare_integer_type(short)                                             \
   declare_integer_type(int)                                               \
   declare_integer_type(long)                                              \
   declare_integer_type(char)                                              \
   declare_unsigned_integer_type(unsigned char)                            \
+  declare_unsigned_integer_type(u_char)                                   \
   declare_unsigned_integer_type(unsigned int)                             \
+  declare_unsigned_integer_type(uint)                                     \
   declare_unsigned_integer_type(unsigned short)                           \
+  declare_unsigned_integer_type(jushort)                                  \
   declare_unsigned_integer_type(unsigned long)                            \
   /* The compiler thinks this is a different type than */                 \
   /* unsigned short on Win32 */                                           \
@@ -1065,7 +1362,6 @@
   declare_toplevel_type(int*)                                             \
   declare_toplevel_type(char*)                                            \
   declare_toplevel_type(char**)                                           \
-  declare_toplevel_type(const char*)                                      \
   declare_toplevel_type(u_char*)                                          \
   declare_toplevel_type(unsigned char*)                                   \
                                                                           \
@@ -1077,14 +1373,12 @@
                                                                           \
   declare_unsigned_integer_type(size_t)                                   \
   declare_integer_type(ssize_t)                                           \
-  declare_unsigned_integer_type(const size_t)                             \
   declare_integer_type(intx)                                              \
   declare_integer_type(intptr_t)                                          \
   declare_unsigned_integer_type(uintx)                                    \
   declare_unsigned_integer_type(uintptr_t)                                \
   declare_unsigned_integer_type(uint32_t)                                 \
   declare_unsigned_integer_type(uint64_t)                                 \
-  declare_integer_type(const int)                                         \
                                                                           \
   /*******************************************************************************/ \
   /* OopDesc and Klass hierarchies (NOTE: missing methodDataOop-related classes) */ \
@@ -1110,8 +1404,8 @@
            declare_type(klassKlass, Klass)                                \
            declare_type(klassOopDesc, oopDesc)                            \
            declare_type(markOopDesc, oopDesc)                             \
-   declare_type(methodDataKlass, Klass)                           \
-   declare_type(methodDataOopDesc, oopDesc)                       \
+   declare_type(methodDataKlass, Klass)                                   \
+   declare_type(methodDataOopDesc, oopDesc)                               \
            declare_type(methodKlass, Klass)                               \
            declare_type(constMethodKlass, Klass)                          \
            declare_type(methodOopDesc, oopDesc)                           \
@@ -1125,6 +1419,8 @@
            declare_toplevel_type(Symbol)                                  \
            declare_toplevel_type(Symbol*)                                 \
                                                                           \
+  declare_toplevel_type(nmethodBucket)                                    \
+                                                                          \
   /********/                                                              \
   /* Oops */                                                              \
   /********/                                                              \
@@ -1192,7 +1488,6 @@
   declare_toplevel_type(GenerationSpec)                                   \
   declare_toplevel_type(HeapWord)                                         \
   declare_toplevel_type(MemRegion)                                        \
-  declare_toplevel_type(const MemRegion)                                  \
   declare_toplevel_type(PermanentGenerationSpec)                          \
   declare_toplevel_type(ThreadLocalAllocBuffer)                           \
   declare_toplevel_type(VirtualSpace)                                     \
@@ -1251,8 +1546,15 @@
     declare_type(LoaderConstraintEntry, HashtableEntry<klassOop>)         \
   declare_toplevel_type(HashtableBucket)                                  \
   declare_toplevel_type(SystemDictionary)                                 \
+  declare_toplevel_type(vmSymbols)                                        \
   declare_toplevel_type(ProtectionDomainEntry)                            \
                                                                           \
+  declare_toplevel_type(GenericGrowableArray)                             \
+  declare_toplevel_type(GrowableArray<int>)                               \
+  declare_toplevel_type(Arena)                                            \
+    declare_type(ResourceArea, Arena)                                     \
+  declare_toplevel_type(Chunk)                                            \
+                                                                          \
   /***********************************************************/           \
   /* Thread hierarchy (needed for run-time type information) */           \
   /***********************************************************/           \
@@ -1265,7 +1567,7 @@
            declare_type(JavaThread, Thread)                               \
            declare_type(JvmtiAgentThread, JavaThread)                     \
            declare_type(ServiceThread, JavaThread)                        \
-  declare_type(CompilerThread, JavaThread)                        \
+  declare_type(CompilerThread, JavaThread)                                \
   declare_toplevel_type(OSThread)                                         \
   declare_toplevel_type(JavaFrameAnchor)                                  \
                                                                           \
@@ -1310,24 +1612,32 @@
   /* CodeBlob hierarchy (needed for run-time type information) */         \
   /*************************************************************/         \
                                                                           \
+  declare_toplevel_type(SharedRuntime)                                    \
+  X86_ONLY(declare_toplevel_type(MethodHandles::RicochetFrame))           \
+                                                                          \
   declare_toplevel_type(CodeBlob)                                         \
-  declare_type(BufferBlob,            CodeBlob)                           \
-  declare_type(AdapterBlob,           BufferBlob)                         \
-  declare_type(nmethod,               CodeBlob)                           \
-  declare_type(RuntimeStub,           CodeBlob)                           \
-  declare_type(SingletonBlob,         CodeBlob)                           \
-  declare_type(SafepointBlob,         SingletonBlob)                      \
-  declare_type(DeoptimizationBlob,    SingletonBlob)                      \
-  declare_type(RicochetBlob,          SingletonBlob)                      \
-  declare_c2_type(ExceptionBlob,      SingletonBlob)                      \
-  declare_c2_type(UncommonTrapBlob,   CodeBlob)                           \
+  declare_type(BufferBlob,               CodeBlob)                        \
+  declare_type(AdapterBlob,              BufferBlob)                      \
+  declare_type(MethodHandlesAdapterBlob, BufferBlob)                      \
+  declare_type(nmethod,                  CodeBlob)                        \
+  declare_type(RuntimeStub,              CodeBlob)                        \
+  declare_type(SingletonBlob,            CodeBlob)                        \
+  declare_type(SafepointBlob,            SingletonBlob)                   \
+  declare_type(DeoptimizationBlob,       SingletonBlob)                   \
+  declare_type(RicochetBlob,             SingletonBlob)                   \
+  declare_c2_type(ExceptionBlob,         SingletonBlob)                   \
+  declare_c2_type(UncommonTrapBlob,      CodeBlob)                        \
                                                                           \
   /***************************************/                               \
   /* PcDesc and other compiled code info */                               \
   /***************************************/                               \
                                                                           \
   declare_toplevel_type(PcDesc)                                           \
-  declare_integer_type(PcDesc::PcDescFlags)                               \
+  declare_toplevel_type(ExceptionCache)                                   \
+  declare_toplevel_type(PcDescCache)                                      \
+  declare_toplevel_type(Dependencies)                                     \
+  declare_toplevel_type(CompileTask)                                      \
+  declare_toplevel_type(Deoptimization)                                   \
                                                                           \
   /************************/                                              \
   /* OopMap and OopMapSet */                                              \
@@ -1354,6 +1664,7 @@
                                                                           \
   declare_toplevel_type(JNIHandles)                                       \
   declare_toplevel_type(JNIHandleBlock)                                   \
+  declare_toplevel_type(jobject)                                          \
                                                                           \
   /**********************/                                                \
   /* Runtime1 (C1 only) */                                                \
@@ -1374,9 +1685,349 @@
   /* Matcher (C2 only) */                                                 \
   /*********************/                                                 \
                                                                           \
-  /* NOTE: this is not really a toplevel type, but we only need */        \
-  /* this one -- FIXME later if necessary */                              \
   declare_c2_toplevel_type(Matcher)                                       \
+  declare_c2_toplevel_type(Compile)                                       \
+  declare_c2_toplevel_type(InlineTree)                                    \
+  declare_c2_toplevel_type(OptoRegPair)                                   \
+  declare_c2_toplevel_type(JVMState)                                      \
+  declare_c2_toplevel_type(Phase)                                         \
+    declare_c2_type(PhaseCFG, Phase)                                      \
+    declare_c2_type(PhaseRegAlloc, Phase)                                 \
+    declare_c2_type(PhaseChaitin, PhaseRegAlloc)                          \
+  declare_c2_toplevel_type(CFGElement)                                    \
+    declare_c2_type(Block, CFGElement)                                    \
+  declare_c2_toplevel_type(Block_Array)                                   \
+    declare_c2_type(Block_List, Block_Array)                              \
+  declare_c2_toplevel_type(Node_Array)                                    \
+  declare_c2_type(Node_List, Node_Array)                                  \
+  declare_c2_type(Unique_Node_List, Node_List)                            \
+  declare_c2_toplevel_type(Node)                                          \
+  declare_c2_type(AddNode, Node)                                          \
+  declare_c2_type(AddINode, AddNode)                                      \
+  declare_c2_type(AddLNode, AddNode)                                      \
+  declare_c2_type(AddFNode, AddNode)                                      \
+  declare_c2_type(AddDNode, AddNode)                                      \
+  declare_c2_type(AddPNode, Node)                                         \
+  declare_c2_type(OrINode, AddNode)                                       \
+  declare_c2_type(OrLNode, AddNode)                                       \
+  declare_c2_type(XorINode, AddNode)                                      \
+  declare_c2_type(XorLNode, AddNode)                                      \
+  declare_c2_type(MaxNode, AddNode)                                       \
+  declare_c2_type(MaxINode, MaxNode)                                      \
+  declare_c2_type(MinINode, MaxNode)                                      \
+  declare_c2_type(StartNode, MultiNode)                                   \
+  declare_c2_type(StartOSRNode, StartNode)                                \
+  declare_c2_type(ParmNode, ProjNode)                                     \
+  declare_c2_type(ReturnNode, Node)                                       \
+  declare_c2_type(RethrowNode, Node)                                      \
+  declare_c2_type(TailCallNode, ReturnNode)                               \
+  declare_c2_type(TailJumpNode, ReturnNode)                               \
+  declare_c2_type(SafePointNode, MultiNode)                               \
+  declare_c2_type(CallNode, SafePointNode)                                \
+  declare_c2_type(CallJavaNode, CallNode)                                 \
+  declare_c2_type(CallStaticJavaNode, CallJavaNode)                       \
+  declare_c2_type(CallDynamicJavaNode, CallJavaNode)                      \
+  declare_c2_type(CallRuntimeNode, CallNode)                              \
+  declare_c2_type(CallLeafNode, CallRuntimeNode)                          \
+  declare_c2_type(CallLeafNoFPNode, CallLeafNode)                         \
+  declare_c2_type(AllocateNode, CallNode)                                 \
+  declare_c2_type(AllocateArrayNode, AllocateNode)                        \
+  declare_c2_type(LockNode, AbstractLockNode)                             \
+  declare_c2_type(UnlockNode, AbstractLockNode)                           \
+  declare_c2_type(FastLockNode, CmpNode)                                  \
+  declare_c2_type(FastUnlockNode, CmpNode)                                \
+  declare_c2_type(RegionNode, Node)                                       \
+  declare_c2_type(JProjNode, ProjNode)                                    \
+  declare_c2_type(PhiNode, TypeNode)                                      \
+  declare_c2_type(GotoNode, Node)                                         \
+  declare_c2_type(CProjNode, ProjNode)                                    \
+  declare_c2_type(MultiBranchNode, MultiNode)                             \
+  declare_c2_type(IfNode, MultiBranchNode)                                \
+  declare_c2_type(IfTrueNode, CProjNode)                                  \
+  declare_c2_type(IfFalseNode, CProjNode)                                 \
+  declare_c2_type(PCTableNode, MultiBranchNode)                           \
+  declare_c2_type(JumpNode, PCTableNode)                                  \
+  declare_c2_type(JumpProjNode, JProjNode)                                \
+  declare_c2_type(CatchNode, PCTableNode)                                 \
+  declare_c2_type(CatchProjNode, CProjNode)                               \
+  declare_c2_type(CreateExNode, TypeNode)                                 \
+  declare_c2_type(ClearArrayNode, Node)                                   \
+  declare_c2_type(NeverBranchNode, MultiBranchNode)                       \
+  declare_c2_type(ConNode, TypeNode)                                      \
+  declare_c2_type(ConINode, ConNode)                                      \
+  declare_c2_type(ConPNode, ConNode)                                      \
+  declare_c2_type(ConNNode, ConNode)                                      \
+  declare_c2_type(ConLNode, ConNode)                                      \
+  declare_c2_type(ConFNode, ConNode)                                      \
+  declare_c2_type(ConDNode, ConNode)                                      \
+  declare_c2_type(BinaryNode, Node)                                       \
+  declare_c2_type(CMoveNode, TypeNode)                                    \
+  declare_c2_type(CMoveDNode, CMoveNode)                                  \
+  declare_c2_type(CMoveFNode, CMoveNode)                                  \
+  declare_c2_type(CMoveINode, CMoveNode)                                  \
+  declare_c2_type(CMoveLNode, CMoveNode)                                  \
+  declare_c2_type(CMovePNode, CMoveNode)                                  \
+  declare_c2_type(CMoveNNode, CMoveNode)                                  \
+  declare_c2_type(EncodePNode, TypeNode)                                  \
+  declare_c2_type(DecodeNNode, TypeNode)                                  \
+  declare_c2_type(ConstraintCastNode, TypeNode)                           \
+  declare_c2_type(CastIINode, ConstraintCastNode)                         \
+  declare_c2_type(CastPPNode, ConstraintCastNode)                         \
+  declare_c2_type(CheckCastPPNode, TypeNode)                              \
+  declare_c2_type(Conv2BNode, Node)                                       \
+  declare_c2_type(ConvD2FNode, Node)                                      \
+  declare_c2_type(ConvD2INode, Node)                                      \
+  declare_c2_type(ConvD2LNode, Node)                                      \
+  declare_c2_type(ConvF2DNode, Node)                                      \
+  declare_c2_type(ConvF2INode, Node)                                      \
+  declare_c2_type(ConvF2LNode, Node)                                      \
+  declare_c2_type(ConvI2DNode, Node)                                      \
+  declare_c2_type(ConvI2FNode, Node)                                      \
+  declare_c2_type(ConvI2LNode, TypeNode)                                  \
+  declare_c2_type(ConvL2DNode, Node)                                      \
+  declare_c2_type(ConvL2FNode, Node)                                      \
+  declare_c2_type(ConvL2INode, Node)                                      \
+  declare_c2_type(CastX2PNode, Node)                                      \
+  declare_c2_type(CastP2XNode, Node)                                      \
+  declare_c2_type(MemBarNode, MultiNode)                                  \
+  declare_c2_type(MemBarAcquireNode, MemBarNode)                          \
+  declare_c2_type(MemBarReleaseNode, MemBarNode)                          \
+  declare_c2_type(MemBarVolatileNode, MemBarNode)                         \
+  declare_c2_type(MemBarCPUOrderNode, MemBarNode)                         \
+  declare_c2_type(InitializeNode, MemBarNode)                             \
+  declare_c2_type(ThreadLocalNode, Node)                                  \
+  declare_c2_type(Opaque1Node, Node)                                      \
+  declare_c2_type(Opaque2Node, Node)                                      \
+  declare_c2_type(PartialSubtypeCheckNode, Node)                          \
+  declare_c2_type(MoveI2FNode, Node)                                      \
+  declare_c2_type(MoveL2DNode, Node)                                      \
+  declare_c2_type(MoveF2INode, Node)                                      \
+  declare_c2_type(MoveD2LNode, Node)                                      \
+  declare_c2_type(DivINode, Node)                                         \
+  declare_c2_type(DivLNode, Node)                                         \
+  declare_c2_type(DivFNode, Node)                                         \
+  declare_c2_type(DivDNode, Node)                                         \
+  declare_c2_type(ModINode, Node)                                         \
+  declare_c2_type(ModLNode, Node)                                         \
+  declare_c2_type(ModFNode, Node)                                         \
+  declare_c2_type(ModDNode, Node)                                         \
+  declare_c2_type(DivModNode, MultiNode)                                  \
+  declare_c2_type(DivModINode, DivModNode)                                \
+  declare_c2_type(DivModLNode, DivModNode)                                \
+  declare_c2_type(BoxLockNode, Node)                                      \
+  declare_c2_type(LoopNode, RegionNode)                                   \
+  declare_c2_type(CountedLoopNode, LoopNode)                              \
+  declare_c2_type(CountedLoopEndNode, IfNode)                             \
+  declare_c2_type(MachNode, Node)                                         \
+  declare_c2_type(MachIdealNode, MachNode)                                \
+  declare_c2_type(MachTypeNode, MachNode)                                 \
+  declare_c2_type(MachBreakpointNode, MachIdealNode)                      \
+  declare_c2_type(MachUEPNode, MachIdealNode)                             \
+  declare_c2_type(MachPrologNode, MachIdealNode)                          \
+  declare_c2_type(MachEpilogNode, MachIdealNode)                          \
+  declare_c2_type(MachNopNode, MachIdealNode)                             \
+  declare_c2_type(MachSpillCopyNode, MachIdealNode)                       \
+  declare_c2_type(MachNullCheckNode, MachIdealNode)                       \
+  declare_c2_type(MachProjNode, ProjNode)                                 \
+  declare_c2_type(MachIfNode, MachNode)                                   \
+  declare_c2_type(MachFastLockNode, MachNode)                             \
+  declare_c2_type(MachReturnNode, MachNode)                               \
+  declare_c2_type(MachSafePointNode, MachReturnNode)                      \
+  declare_c2_type(MachCallNode, MachSafePointNode)                        \
+  declare_c2_type(MachCallJavaNode, MachCallNode)                         \
+  declare_c2_type(MachCallStaticJavaNode, MachCallJavaNode)               \
+  declare_c2_type(MachCallDynamicJavaNode, MachCallJavaNode)              \
+  declare_c2_type(MachCallRuntimeNode, MachCallNode)                      \
+  declare_c2_type(MachHaltNode, MachReturnNode)                           \
+  declare_c2_type(MachTempNode, MachNode)                                 \
+  declare_c2_type(MemNode, Node)                                          \
+  declare_c2_type(MergeMemNode, Node)                                     \
+  declare_c2_type(LoadNode, MemNode)                                      \
+  declare_c2_type(LoadBNode, LoadNode)                                    \
+  declare_c2_type(LoadUSNode, LoadNode)                                   \
+  declare_c2_type(LoadINode, LoadNode)                                    \
+  declare_c2_type(LoadRangeNode, LoadINode)                               \
+  declare_c2_type(LoadLNode, LoadNode)                                    \
+  declare_c2_type(LoadL_unalignedNode, LoadLNode)                         \
+  declare_c2_type(LoadFNode, LoadNode)                                    \
+  declare_c2_type(LoadDNode, LoadNode)                                    \
+  declare_c2_type(LoadD_unalignedNode, LoadDNode)                         \
+  declare_c2_type(LoadPNode, LoadNode)                                    \
+  declare_c2_type(LoadNNode, LoadNode)                                    \
+  declare_c2_type(LoadKlassNode, LoadPNode)                               \
+  declare_c2_type(LoadNKlassNode, LoadNNode)                              \
+  declare_c2_type(LoadSNode, LoadNode)                                    \
+  declare_c2_type(StoreNode, MemNode)                                     \
+  declare_c2_type(StoreBNode, StoreNode)                                  \
+  declare_c2_type(StoreCNode, StoreNode)                                  \
+  declare_c2_type(StoreINode, StoreNode)                                  \
+  declare_c2_type(StoreLNode, StoreNode)                                  \
+  declare_c2_type(StoreFNode, StoreNode)                                  \
+  declare_c2_type(StoreDNode, StoreNode)                                  \
+  declare_c2_type(StorePNode, StoreNode)                                  \
+  declare_c2_type(StoreNNode, StoreNode)                                  \
+  declare_c2_type(StoreCMNode, StoreNode)                                 \
+  declare_c2_type(LoadPLockedNode, LoadPNode)                             \
+  declare_c2_type(LoadLLockedNode, LoadLNode)                             \
+  declare_c2_type(SCMemProjNode, ProjNode)                                \
+  declare_c2_type(LoadStoreNode, Node)                                    \
+  declare_c2_type(StorePConditionalNode, LoadStoreNode)                   \
+  declare_c2_type(StoreLConditionalNode, LoadStoreNode)                   \
+  declare_c2_type(CompareAndSwapLNode, LoadStoreNode)                     \
+  declare_c2_type(CompareAndSwapINode, LoadStoreNode)                     \
+  declare_c2_type(CompareAndSwapPNode, LoadStoreNode)                     \
+  declare_c2_type(CompareAndSwapNNode, LoadStoreNode)                     \
+  declare_c2_type(PrefetchReadNode, Node)                                 \
+  declare_c2_type(PrefetchWriteNode, Node)                                \
+  declare_c2_type(MulNode, Node)                                          \
+  declare_c2_type(MulINode, MulNode)                                      \
+  declare_c2_type(MulLNode, MulNode)                                      \
+  declare_c2_type(MulFNode, MulNode)                                      \
+  declare_c2_type(MulDNode, MulNode)                                      \
+  declare_c2_type(MulHiLNode, Node)                                       \
+  declare_c2_type(AndINode, MulINode)                                     \
+  declare_c2_type(AndLNode, MulLNode)                                     \
+  declare_c2_type(LShiftINode, Node)                                      \
+  declare_c2_type(LShiftLNode, Node)                                      \
+  declare_c2_type(RShiftINode, Node)                                      \
+  declare_c2_type(RShiftLNode, Node)                                      \
+  declare_c2_type(URShiftINode, Node)                                     \
+  declare_c2_type(URShiftLNode, Node)                                     \
+  declare_c2_type(MultiNode, Node)                                        \
+  declare_c2_type(ProjNode, Node)                                         \
+  declare_c2_type(TypeNode, Node)                                         \
+  declare_c2_type(NodeHash, StackObj)                                     \
+  declare_c2_type(RootNode, LoopNode)                                     \
+  declare_c2_type(HaltNode, Node)                                         \
+  declare_c2_type(SubNode, Node)                                          \
+  declare_c2_type(SubINode, SubNode)                                      \
+  declare_c2_type(SubLNode, SubNode)                                      \
+  declare_c2_type(SubFPNode, SubNode)                                     \
+  declare_c2_type(SubFNode, SubFPNode)                                    \
+  declare_c2_type(SubDNode, SubFPNode)                                    \
+  declare_c2_type(CmpNode, SubNode)                                       \
+  declare_c2_type(CmpINode, CmpNode)                                      \
+  declare_c2_type(CmpUNode, CmpNode)                                      \
+  declare_c2_type(CmpPNode, CmpNode)                                      \
+  declare_c2_type(CmpNNode, CmpNode)                                      \
+  declare_c2_type(CmpLNode, CmpNode)                                      \
+  declare_c2_type(CmpL3Node, CmpLNode)                                    \
+  declare_c2_type(CmpFNode, CmpNode)                                      \
+  declare_c2_type(CmpF3Node, CmpFNode)                                    \
+  declare_c2_type(CmpDNode, CmpNode)                                      \
+  declare_c2_type(CmpD3Node, CmpDNode)                                    \
+  declare_c2_type(BoolNode, Node)                                         \
+  declare_c2_type(AbsNode, Node)                                          \
+  declare_c2_type(AbsINode, AbsNode)                                      \
+  declare_c2_type(AbsFNode, AbsNode)                                      \
+  declare_c2_type(AbsDNode, AbsNode)                                      \
+  declare_c2_type(CmpLTMaskNode, Node)                                    \
+  declare_c2_type(NegNode, Node)                                          \
+  declare_c2_type(NegFNode, NegNode)                                      \
+  declare_c2_type(NegDNode, NegNode)                                      \
+  declare_c2_type(CosDNode, Node)                                         \
+  declare_c2_type(SinDNode, Node)                                         \
+  declare_c2_type(TanDNode, Node)                                         \
+  declare_c2_type(AtanDNode, Node)                                        \
+  declare_c2_type(SqrtDNode, Node)                                        \
+  declare_c2_type(ExpDNode, Node)                                         \
+  declare_c2_type(LogDNode, Node)                                         \
+  declare_c2_type(Log10DNode, Node)                                       \
+  declare_c2_type(PowDNode, Node)                                         \
+  declare_c2_type(ReverseBytesINode, Node)                                \
+  declare_c2_type(ReverseBytesLNode, Node)                                \
+  declare_c2_type(VectorNode, Node)                                       \
+  declare_c2_type(AddVBNode, VectorNode)                                  \
+  declare_c2_type(AddVCNode, VectorNode)                                  \
+  declare_c2_type(AddVSNode, VectorNode)                                  \
+  declare_c2_type(AddVINode, VectorNode)                                  \
+  declare_c2_type(AddVLNode, VectorNode)                                  \
+  declare_c2_type(AddVFNode, VectorNode)                                  \
+  declare_c2_type(AddVDNode, VectorNode)                                  \
+  declare_c2_type(SubVBNode, VectorNode)                                  \
+  declare_c2_type(SubVCNode, VectorNode)                                  \
+  declare_c2_type(SubVSNode, VectorNode)                                  \
+  declare_c2_type(SubVINode, VectorNode)                                  \
+  declare_c2_type(SubVLNode, VectorNode)                                  \
+  declare_c2_type(SubVFNode, VectorNode)                                  \
+  declare_c2_type(SubVDNode, VectorNode)                                  \
+  declare_c2_type(MulVFNode, VectorNode)                                  \
+  declare_c2_type(MulVDNode, VectorNode)                                  \
+  declare_c2_type(DivVFNode, VectorNode)                                  \
+  declare_c2_type(DivVDNode, VectorNode)                                  \
+  declare_c2_type(LShiftVBNode, VectorNode)                               \
+  declare_c2_type(LShiftVCNode, VectorNode)                               \
+  declare_c2_type(LShiftVSNode, VectorNode)                               \
+  declare_c2_type(LShiftVINode, VectorNode)                               \
+  declare_c2_type(URShiftVBNode, VectorNode)                              \
+  declare_c2_type(URShiftVCNode, VectorNode)                              \
+  declare_c2_type(URShiftVSNode, VectorNode)                              \
+  declare_c2_type(URShiftVINode, VectorNode)                              \
+  declare_c2_type(AndVNode, VectorNode)                                   \
+  declare_c2_type(OrVNode, VectorNode)                                    \
+  declare_c2_type(XorVNode, VectorNode)                                   \
+  declare_c2_type(VectorLoadNode, LoadNode)                               \
+  declare_c2_type(Load16BNode, VectorLoadNode)                            \
+  declare_c2_type(Load8BNode, VectorLoadNode)                             \
+  declare_c2_type(Load4BNode, VectorLoadNode)                             \
+  declare_c2_type(Load8CNode, VectorLoadNode)                             \
+  declare_c2_type(Load4CNode, VectorLoadNode)                             \
+  declare_c2_type(Load2CNode, VectorLoadNode)                             \
+  declare_c2_type(Load8SNode, VectorLoadNode)                             \
+  declare_c2_type(Load4SNode, VectorLoadNode)                             \
+  declare_c2_type(Load2SNode, VectorLoadNode)                             \
+  declare_c2_type(Load4INode, VectorLoadNode)                             \
+  declare_c2_type(Load2INode, VectorLoadNode)                             \
+  declare_c2_type(Load2LNode, VectorLoadNode)                             \
+  declare_c2_type(Load4FNode, VectorLoadNode)                             \
+  declare_c2_type(Load2FNode, VectorLoadNode)                             \
+  declare_c2_type(Load2DNode, VectorLoadNode)                             \
+  declare_c2_type(VectorStoreNode, StoreNode)                             \
+  declare_c2_type(Store16BNode, VectorStoreNode)                          \
+  declare_c2_type(Store8BNode, VectorStoreNode)                           \
+  declare_c2_type(Store4BNode, VectorStoreNode)                           \
+  declare_c2_type(Store8CNode, VectorStoreNode)                           \
+  declare_c2_type(Store4CNode, VectorStoreNode)                           \
+  declare_c2_type(Store2CNode, VectorStoreNode)                           \
+  declare_c2_type(Store4INode, VectorStoreNode)                           \
+  declare_c2_type(Store2INode, VectorStoreNode)                           \
+  declare_c2_type(Store2LNode, VectorStoreNode)                           \
+  declare_c2_type(Store4FNode, VectorStoreNode)                           \
+  declare_c2_type(Store2FNode, VectorStoreNode)                           \
+  declare_c2_type(Store2DNode, VectorStoreNode)                           \
+  declare_c2_type(Replicate16BNode, VectorNode)                           \
+  declare_c2_type(Replicate8BNode, VectorNode)                            \
+  declare_c2_type(Replicate4BNode, VectorNode)                            \
+  declare_c2_type(Replicate8CNode, VectorNode)                            \
+  declare_c2_type(Replicate4CNode, VectorNode)                            \
+  declare_c2_type(Replicate2CNode, VectorNode)                            \
+  declare_c2_type(Replicate8SNode, VectorNode)                            \
+  declare_c2_type(Replicate4SNode, VectorNode)                            \
+  declare_c2_type(Replicate2SNode, VectorNode)                            \
+  declare_c2_type(Replicate4INode, VectorNode)                            \
+  declare_c2_type(Replicate2INode, VectorNode)                            \
+  declare_c2_type(Replicate2LNode, VectorNode)                            \
+  declare_c2_type(Replicate4FNode, VectorNode)                            \
+  declare_c2_type(Replicate2FNode, VectorNode)                            \
+  declare_c2_type(Replicate2DNode, VectorNode)                            \
+  declare_c2_type(PackNode, VectorNode)                                   \
+  declare_c2_type(PackBNode, PackNode)                                    \
+  declare_c2_type(PackCNode, PackNode)                                    \
+  declare_c2_type(PackSNode, PackNode)                                    \
+  declare_c2_type(PackINode, PackNode)                                    \
+  declare_c2_type(PackLNode, PackNode)                                    \
+  declare_c2_type(PackFNode, PackNode)                                    \
+  declare_c2_type(PackDNode, PackNode)                                    \
+  declare_c2_type(Pack2x1BNode, PackNode)                                 \
+  declare_c2_type(Pack2x2BNode, PackNode)                                 \
+  declare_c2_type(ExtractNode, Node)                                      \
+  declare_c2_type(ExtractBNode, ExtractNode)                              \
+  declare_c2_type(ExtractCNode, ExtractNode)                              \
+  declare_c2_type(ExtractSNode, ExtractNode)                              \
+  declare_c2_type(ExtractINode, ExtractNode)                              \
+  declare_c2_type(ExtractLNode, ExtractNode)                              \
+  declare_c2_type(ExtractFNode, ExtractNode)                              \
+  declare_c2_type(ExtractDNode, ExtractNode)                              \
                                                                           \
   /*********************/                                                 \
   /* Adapter Blob Entries */                                              \
@@ -1384,6 +2035,32 @@
   declare_toplevel_type(AdapterHandlerEntry)                              \
   declare_toplevel_type(AdapterHandlerEntry*)                             \
                                                                           \
+  /*********************/                                                 \
+  /* CI */                                                                \
+  /*********************/                                                 \
+  declare_toplevel_type(ciEnv)                                            \
+  declare_toplevel_type(ciObjectFactory)                                  \
+  declare_toplevel_type(ciConstant)                                       \
+  declare_toplevel_type(ciField)                                          \
+  declare_toplevel_type(void*)                                            \
+  declare_toplevel_type(ciObject)                                         \
+  declare_type(ciMethod, ciObject)                                        \
+  declare_type(ciMethodData, ciObject)                                    \
+  declare_type(ciType, ciObject)                                          \
+  declare_type(ciInstance, ciObject)                                      \
+  declare_toplevel_type(ciSymbol)                                         \
+  declare_type(ciKlass, ciType)                                           \
+  declare_type(ciInstanceKlass, ciKlass)                                  \
+  declare_type(ciArrayKlass, ciKlass)                                     \
+  declare_type(ciTypeArrayKlass, ciArrayKlass)                            \
+  declare_type(ciObjArrayKlass, ciArrayKlass)                             \
+  declare_type(ciMethodKlass, ciKlass)                                    \
+  declare_type(ciKlassKlass, ciKlass)                                     \
+  declare_type(ciInstanceKlassKlass, ciKlassKlass)                        \
+  declare_type(ciArrayKlassKlass, ciKlassKlass)                           \
+  declare_type(ciTypeArrayKlassKlass, ciArrayKlassKlass)                  \
+  declare_type(ciObjArrayKlassKlass, ciArrayKlassKlass)                   \
+                                                                          \
   /********************/                                                  \
   /* -XX flags        */                                                  \
   /********************/                                                  \
@@ -1392,6 +2069,12 @@
   declare_toplevel_type(Flag*)                                            \
                                                                           \
   /********************/                                                  \
+  /* JVMTI            */                                                  \
+  /********************/                                                  \
+                                                                          \
+  declare_toplevel_type(JvmtiExport)                                      \
+                                                                          \
+  /********************/                                                  \
   /* JDK/VM version   */                                                  \
   /********************/                                                  \
                                                                           \
@@ -1417,19 +2100,24 @@
    declare_integer_type(Location::Type)                                   \
    declare_integer_type(Location::Where)                                  \
    declare_integer_type(PermGen::Name)                                    \
+   declare_integer_type(FlagValueOrigin)                                  \
+   COMPILER2_PRESENT(declare_integer_type(OptoReg::Name))                 \
                                                                           \
    declare_integer_type(AccessFlags)  /* FIXME: wrong type (not integer) */\
   declare_toplevel_type(address)      /* FIXME: should this be an integer type? */\
+   declare_integer_type(BasicType)   /* FIXME: wrong type (not integer) */\
   declare_toplevel_type(BreakpointInfo)                                   \
   declare_toplevel_type(BreakpointInfo*)                                  \
   declare_toplevel_type(CodeBlob*)                                        \
   declare_toplevel_type(CompressedWriteStream*)                           \
   declare_toplevel_type(ConstantPoolCacheEntry)                           \
   declare_toplevel_type(elapsedTimer)                                     \
+  declare_toplevel_type(frame)                                            \
   declare_toplevel_type(intptr_t*)                                        \
    declare_unsigned_integer_type(InvocationCounter) /* FIXME: wrong type (not integer) */ \
   declare_toplevel_type(JavaThread*)                                      \
   declare_toplevel_type(java_lang_Class)                                  \
+  declare_integer_type(JavaThread::AsyncRequests)                         \
   declare_toplevel_type(jbyte*)                                           \
   declare_toplevel_type(jbyte**)                                          \
   declare_toplevel_type(jint*)                                            \
@@ -1442,6 +2130,7 @@
   declare_toplevel_type(jmethodID*)                                       \
   declare_toplevel_type(Mutex*)                                           \
   declare_toplevel_type(nmethod*)                                         \
+  COMPILER2_PRESENT(declare_unsigned_integer_type(node_idx_t))            \
   declare_toplevel_type(ObjectMonitor*)                                   \
   declare_toplevel_type(oop*)                                             \
   declare_toplevel_type(OopMap**)                                         \
@@ -1452,7 +2141,10 @@
    declare_integer_type(ReferenceType)                                    \
   declare_toplevel_type(StubQueue*)                                       \
   declare_toplevel_type(Thread*)                                          \
-  declare_toplevel_type(Universe)
+  declare_toplevel_type(Universe)                                         \
+  declare_toplevel_type(vframeArray)                                      \
+  declare_toplevel_type(vframeArrayElement)
+
 
   /* NOTE that we do not use the last_entry() macro here; it is used  */
   /* in vmStructs_<os>_<cpu>.hpp's VM_TYPES_OS_CPU macro (and must be */
@@ -1645,18 +2337,23 @@
   declare_constant(constMethodOopDesc::_has_localvariable_table)          \
                                                                           \
   /*************************************/                                 \
-  /* instanceKlass FieldOffset enum    */                                 \
+  /* instanceKlass enum                */                                 \
+  /*************************************/                                 \
+                                                                          \
+  declare_constant(instanceKlass::implementors_limit)                     \
+                                                                          \
+  /*************************************/                                 \
+  /* FieldInfo FieldOffset enum        */                                 \
   /*************************************/                                 \
                                                                           \
-  declare_constant(instanceKlass::access_flags_offset)                    \
-  declare_constant(instanceKlass::name_index_offset)                      \
-  declare_constant(instanceKlass::signature_index_offset)                 \
-  declare_constant(instanceKlass::initval_index_offset)                   \
-  declare_constant(instanceKlass::low_offset)                             \
-  declare_constant(instanceKlass::high_offset)                            \
-  declare_constant(instanceKlass::generic_signature_offset)               \
-  declare_constant(instanceKlass::next_offset)                            \
-  declare_constant(instanceKlass::implementors_limit)                     \
+  declare_constant(FieldInfo::access_flags_offset)                        \
+  declare_constant(FieldInfo::name_index_offset)                          \
+  declare_constant(FieldInfo::signature_index_offset)                     \
+  declare_constant(FieldInfo::initval_index_offset)                       \
+  declare_constant(FieldInfo::low_offset)                                 \
+  declare_constant(FieldInfo::high_offset)                                \
+  declare_constant(FieldInfo::generic_signature_offset)                   \
+  declare_constant(FieldInfo::field_slots)                                \
                                                                           \
   /************************************************/                      \
   /* instanceKlass InnerClassAttributeOffset enum */                      \
@@ -1752,6 +2449,27 @@
   declare_constant(Location::on_stack)                                    \
   declare_constant(Location::in_register)                                 \
                                                                           \
+  declare_constant(Deoptimization::Reason_many)                           \
+  declare_constant(Deoptimization::Reason_none)                           \
+  declare_constant(Deoptimization::Reason_null_check)                     \
+  declare_constant(Deoptimization::Reason_null_assert)                    \
+  declare_constant(Deoptimization::Reason_range_check)                    \
+  declare_constant(Deoptimization::Reason_class_check)                    \
+  declare_constant(Deoptimization::Reason_array_check)                    \
+  declare_constant(Deoptimization::Reason_intrinsic)                      \
+  declare_constant(Deoptimization::Reason_bimorphic)                      \
+  declare_constant(Deoptimization::Reason_unloaded)                       \
+  declare_constant(Deoptimization::Reason_uninitialized)                  \
+  declare_constant(Deoptimization::Reason_unreached)                      \
+  declare_constant(Deoptimization::Reason_unhandled)                      \
+  declare_constant(Deoptimization::Reason_constraint)                     \
+  declare_constant(Deoptimization::Reason_div0_check)                     \
+  declare_constant(Deoptimization::Reason_age)                            \
+  declare_constant(Deoptimization::Reason_predicate)                      \
+  declare_constant(Deoptimization::Reason_loop_limit_check)               \
+  declare_constant(Deoptimization::Reason_LIMIT)                          \
+  declare_constant(Deoptimization::Reason_RECORDED_LIMIT)                 \
+                                                                          \
   /*********************/                                                 \
   /* Matcher (C2 only) */                                                 \
   /*********************/                                                 \
@@ -1796,6 +2514,28 @@
                                                                           \
   declare_constant(ObjectSynchronizer::_BLOCKSIZE)                        \
                                                                           \
+  /**********************/                                                \
+  /* PcDesc             */                                                \
+  /**********************/                                                \
+                                                                          \
+  declare_constant(PcDesc::PCDESC_reexecute)                              \
+  declare_constant(PcDesc::PCDESC_is_method_handle_invoke)                \
+  declare_constant(PcDesc::PCDESC_return_oop)                             \
+                                                                          \
+  /**********************/                                                \
+  /* frame              */                                                \
+  /**********************/                                                \
+                                                                          \
+  X86_ONLY(declare_constant(frame::entry_frame_call_wrapper_offset))      \
+  declare_constant(frame::pc_return_offset)                               \
+                                                                          \
+  /*************/                                                         \
+  /* vmSymbols */                                                         \
+  /*************/                                                         \
+                                                                          \
+  declare_constant(vmSymbols::FIRST_SID)                                  \
+  declare_constant(vmSymbols::SID_LIMIT)                                  \
+                                                                          \
   /********************************/                                      \
   /* Calling convention constants */                                      \
   /********************************/                                      \
@@ -2130,6 +2870,9 @@
   VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
                  GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
                  GENERATE_STATIC_VM_STRUCT_ENTRY)
+
+  VM_STRUCTS_G1(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+                GENERATE_STATIC_VM_STRUCT_ENTRY)
 #endif // SERIALGC
 
   VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
@@ -2173,6 +2916,9 @@
                GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
 
   VM_TYPES_PARNEW(GENERATE_VM_TYPE_ENTRY)
+
+  VM_TYPES_G1(GENERATE_VM_TYPE_ENTRY,
+              GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
 #endif // SERIALGC
 
   VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
@@ -2272,6 +3018,9 @@
   VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
              CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
              CHECK_STATIC_VM_STRUCT_ENTRY);
+
+  VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
+                CHECK_STATIC_VM_STRUCT_ENTRY);
 #endif // SERIALGC
 
   VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
@@ -2312,6 +3061,9 @@
                CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
 
   VM_TYPES_PARNEW(CHECK_VM_TYPE_ENTRY)
+
+  VM_TYPES_G1(CHECK_VM_TYPE_ENTRY,
+              CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
 #endif // SERIALGC
 
   VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
@@ -2377,6 +3129,8 @@
   debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT));
+  debug_only(VM_STRUCTS_G1(ENSURE_FIELD_TYPE_PRESENT, \
+                           ENSURE_FIELD_TYPE_PRESENT));
 #endif // SERIALGC
   debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT, \
@@ -2440,12 +3194,14 @@
   {
     VMTypeEntry* types = origtypes;
     while (types->typeName != NULL) {
-      if (!strcmp(typeName, types->typeName)) {
+      if (strcmp(typeName, types->typeName) == 0) {
+        // Found it
         return 1;
       }
       ++types;
     }
   }
+  // Search for the base type by peeling off const and *
   size_t len = strlen(typeName);
   if (typeName[len-1] == '*') {
     char * s = new char[len];
@@ -2490,7 +3246,7 @@
   if (!isRecurse) {
     tty->print_cr("type \"%s\" not found", typeName);
   }
-  return 2;
+  return 0;
 }
 
 
--- a/src/share/vm/runtime/vm_version.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/vm_version.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -46,6 +46,7 @@
 const char* Abstract_VM_Version::_s_internal_vm_info_string = Abstract_VM_Version::internal_vm_info_string();
 bool Abstract_VM_Version::_supports_cx8 = false;
 unsigned int Abstract_VM_Version::_logical_processors_per_package = 1U;
+int Abstract_VM_Version::_reserve_for_allocation_prefetch = 0;
 
 #ifndef HOTSPOT_RELEASE_VERSION
   #error HOTSPOT_RELEASE_VERSION must be defined
--- a/src/share/vm/runtime/vm_version.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/runtime/vm_version.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -44,6 +44,7 @@
   static bool         _initialized;
   static int          _parallel_worker_threads;
   static bool         _parallel_worker_threads_initialized;
+  static int          _reserve_for_allocation_prefetch;
 
   static unsigned int nof_parallel_worker_threads(unsigned int num,
                                                   unsigned int dem,
@@ -77,6 +78,12 @@
     return _logical_processors_per_package;
   }
 
+  // Need a space at the end of TLAB for prefetch instructions
+  // which may fault when accessing memory outside of heap.
+  static int reserve_for_allocation_prefetch() {
+    return _reserve_for_allocation_prefetch;
+  }
+
   // ARCH specific policy for the BiasedLocking
   static bool use_biased_locking()  { return true; }
 
--- a/src/share/vm/services/g1MemoryPool.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/services/g1MemoryPool.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -32,71 +32,44 @@
 G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
                                      const char* name,
                                      size_t init_size,
+                                     size_t max_size,
                                      bool support_usage_threshold) :
-  _g1h(g1h), CollectedMemoryPool(name,
-                                   MemoryPool::Heap,
-                                   init_size,
-                                   undefined_max(),
-                                   support_usage_threshold) {
+  _g1mm(g1h->g1mm()), CollectedMemoryPool(name,
+                                          MemoryPool::Heap,
+                                          init_size,
+                                          max_size,
+                                          support_usage_threshold) {
   assert(UseG1GC, "sanity");
 }
 
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
-  return MAX2(eden_space_used(g1h), (size_t) HeapRegion::GrainBytes);
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->eden_space_used();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->survivor_space_committed();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->survivor_space_used();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->old_space_committed();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->old_space_used();
-}
-
 G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
-                    "G1 Eden",
-                    eden_space_committed(g1h), /* init_size */
+                    "G1 Eden Space",
+                    g1h->g1mm()->eden_space_committed(), /* init_size */
+                    _undefined_max,
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1EdenPool::get_memory_usage() {
   size_t initial_sz = initial_size();
   size_t max_sz     = max_size();
   size_t used       = used_in_bytes();
-  size_t committed  = eden_space_committed(_g1h);
+  size_t committed  = _g1mm->eden_space_committed();
 
   return MemoryUsage(initial_sz, used, committed, max_sz);
 }
 
 G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
-                    "G1 Survivor",
-                    survivor_space_committed(g1h), /* init_size */
+                    "G1 Survivor Space",
+                    g1h->g1mm()->survivor_space_committed(), /* init_size */
+                    _undefined_max,
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1SurvivorPool::get_memory_usage() {
   size_t initial_sz = initial_size();
   size_t max_sz     = max_size();
   size_t used       = used_in_bytes();
-  size_t committed  = survivor_space_committed(_g1h);
+  size_t committed  = _g1mm->survivor_space_committed();
 
   return MemoryUsage(initial_sz, used, committed, max_sz);
 }
@@ -104,14 +77,15 @@
 G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
                     "G1 Old Gen",
-                    old_space_committed(g1h), /* init_size */
+                    g1h->g1mm()->old_space_committed(), /* init_size */
+                    _undefined_max,
                     true /* support_usage_threshold */) { }
 
 MemoryUsage G1OldGenPool::get_memory_usage() {
   size_t initial_sz = initial_size();
   size_t max_sz     = max_size();
   size_t used       = used_in_bytes();
-  size_t committed  = old_space_committed(_g1h);
+  size_t committed  = _g1mm->old_space_committed();
 
   return MemoryUsage(initial_sz, used, committed, max_sz);
 }
--- a/src/share/vm/services/g1MemoryPool.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/services/g1MemoryPool.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -26,12 +26,11 @@
 #define SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
 
 #ifndef SERIALGC
+#include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "services/memoryPool.hpp"
 #include "services/memoryUsage.hpp"
 #endif
 
-class G1CollectedHeap;
-
 // This file contains the three classes that represent the memory
 // pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
 // G1OldGenPool. In G1, unlike our other GCs, we do not have a
@@ -50,37 +49,19 @@
 // on this model.
 //
 
-
 // This class is shared by the three G1 memory pool classes
-// (G1EdenPool, G1SurvivorPool, G1OldGenPool). Given that the way we
-// calculate used / committed bytes for these three pools is related
-// (see comment above), we put the calculations in this class so that
-// we can easily share them among the subclasses.
+// (G1EdenPool, G1SurvivorPool, G1OldGenPool).
 class G1MemoryPoolSuper : public CollectedMemoryPool {
 protected:
-  G1CollectedHeap* _g1h;
+  const static size_t _undefined_max = (size_t) -1;
+  G1MonitoringSupport* _g1mm;
 
   // Would only be called from subclasses.
   G1MemoryPoolSuper(G1CollectedHeap* g1h,
                     const char* name,
                     size_t init_size,
+                    size_t max_size,
                     bool support_usage_threshold);
-
-  // The reason why all the code is in static methods is so that it
-  // can be safely called from the constructors of the subclasses.
-
-  static size_t undefined_max() {
-    return (size_t) -1;
-  }
-
-  static size_t eden_space_committed(G1CollectedHeap* g1h);
-  static size_t eden_space_used(G1CollectedHeap* g1h);
-
-  static size_t survivor_space_committed(G1CollectedHeap* g1h);
-  static size_t survivor_space_used(G1CollectedHeap* g1h);
-
-  static size_t old_space_committed(G1CollectedHeap* g1h);
-  static size_t old_space_used(G1CollectedHeap* g1h);
 };
 
 // Memory pool that represents the G1 eden.
@@ -89,10 +70,10 @@
   G1EdenPool(G1CollectedHeap* g1h);
 
   size_t used_in_bytes() {
-    return eden_space_used(_g1h);
+    return _g1mm->eden_space_used();
   }
   size_t max_size() const {
-    return undefined_max();
+    return _undefined_max;
   }
   MemoryUsage get_memory_usage();
 };
@@ -103,10 +84,10 @@
   G1SurvivorPool(G1CollectedHeap* g1h);
 
   size_t used_in_bytes() {
-    return survivor_space_used(_g1h);
+    return _g1mm->survivor_space_used();
   }
   size_t max_size() const {
-    return undefined_max();
+    return _undefined_max;
   }
   MemoryUsage get_memory_usage();
 };
@@ -117,10 +98,10 @@
   G1OldGenPool(G1CollectedHeap* g1h);
 
   size_t used_in_bytes() {
-    return old_space_used(_g1h);
+    return _g1mm->old_space_used();
   }
   size_t max_size() const {
-    return undefined_max();
+    return _undefined_max;
   }
   MemoryUsage get_memory_usage();
 };
--- a/src/share/vm/services/gcNotifier.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/services/gcNotifier.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -92,7 +92,6 @@
                           &args,
                           CHECK_NH);
   return Handle(THREAD,(oop)result.get_jobject());
-
 }
 
 static Handle createGcInfo(GCMemoryManager *gcManager, GCStatInfo *gcStatInfo,TRAPS) {
@@ -100,9 +99,16 @@
   // Fill the arrays of MemoryUsage objects with before and after GC
   // per pool memory usage
 
-  klassOop muKlass = Management::java_lang_management_MemoryUsage_klass(CHECK_NH);   objArrayOop bu = oopFactory::new_objArray( muKlass,MemoryService::num_memory_pools(), CHECK_NH);
+  klassOop mu_klass = Management::java_lang_management_MemoryUsage_klass(CHECK_NH);
+  instanceKlassHandle mu_kh(THREAD, mu_klass);
+
+  // The array allocations below should use a handle containing mu_klass
+  // as the first allocation could trigger a GC, causing the actual
+  // klass oop to move, and leaving mu_klass pointing to the old
+  // location.
+  objArrayOop bu = oopFactory::new_objArray(mu_kh(), MemoryService::num_memory_pools(), CHECK_NH);
   objArrayHandle usage_before_gc_ah(THREAD, bu);
-  objArrayOop au = oopFactory::new_objArray(muKlass,MemoryService::num_memory_pools(), CHECK_NH);
+  objArrayOop au = oopFactory::new_objArray(mu_kh(), MemoryService::num_memory_pools(), CHECK_NH);
   objArrayHandle usage_after_gc_ah(THREAD, au);
 
   for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
@@ -126,7 +132,7 @@
   // The type is 'I'
   objArrayOop extra_args_array = oopFactory::new_objArray(SystemDictionary::Integer_klass(), 1, CHECK_NH);
   objArrayHandle extra_array (THREAD, extra_args_array);
-  klassOop itKlass= SystemDictionary::Integer_klass();
+  klassOop itKlass = SystemDictionary::Integer_klass();
   instanceKlassHandle intK(THREAD, itKlass);
 
   instanceHandle extra_arg_val = intK->allocate_instance_handle(CHECK_NH);
@@ -147,7 +153,7 @@
   extra_array->obj_at_put(0,extra_arg_val());
 
   klassOop gcInfoklass = Management::com_sun_management_GcInfo_klass(CHECK_NH);
-  instanceKlassHandle ik (THREAD,gcInfoklass);
+  instanceKlassHandle ik(THREAD, gcInfoklass);
 
   Handle gcInfo_instance = ik->allocate_instance_handle(CHECK_NH);
 
--- a/src/share/vm/services/heapDumper.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/services/heapDumper.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -27,6 +27,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
+#include "memory/gcLocker.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
 #include "memory/universe.hpp"
 #include "oops/objArrayKlass.hpp"
@@ -1709,11 +1710,16 @@
 
   HandleMark hm;
   CollectedHeap* ch = Universe::heap();
+
+  ch->ensure_parsability(false); // must happen, even if collection does
+                                 // not happen (e.g. due to GC_locker)
+
   if (_gc_before_heap_dump) {
-    ch->collect_as_vm_thread(GCCause::_heap_dump);
-  } else {
-    // make the heap parsable (no need to retire TLABs)
-    ch->ensure_parsability(false);
+    if (GC_locker::is_active()) {
+      warning("GC locker is held; pre-heapdump GC was skipped");
+    } else {
+      ch->collect_as_vm_thread(GCCause::_heap_dump);
+    }
   }
 
   // At this point we should be the only dumper active, so
--- a/src/share/vm/utilities/accessFlags.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/utilities/accessFlags.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -79,9 +79,14 @@
   // Note that the class-related ACC_ANNOTATION bit conflicts with these flags.
   JVM_ACC_FIELD_ACCESS_WATCHED       = 0x00002000,  // field access is watched by JVMTI
   JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000,  // field modification is watched by JVMTI
+  JVM_ACC_FIELD_INTERNAL             = 0x00000400,  // internal field, same as JVM_ACC_ABSTRACT
+
+  JVM_ACC_FIELD_INTERNAL_FLAGS       = JVM_ACC_FIELD_ACCESS_WATCHED |
+                                       JVM_ACC_FIELD_MODIFICATION_WATCHED |
+                                       JVM_ACC_FIELD_INTERNAL,
 
                                                     // flags accepted by set_field_flags()
-  JVM_ACC_FIELD_FLAGS                = 0x00008000 | JVM_ACC_WRITTEN_FLAGS
+  JVM_ACC_FIELD_FLAGS                = JVM_RECOGNIZED_FIELD_MODIFIERS | JVM_ACC_FIELD_INTERNAL_FLAGS
 
 };
 
@@ -150,13 +155,17 @@
   bool is_field_access_watched() const  { return (_flags & JVM_ACC_FIELD_ACCESS_WATCHED) != 0; }
   bool is_field_modification_watched() const
                                         { return (_flags & JVM_ACC_FIELD_MODIFICATION_WATCHED) != 0; }
+  bool is_internal() const              { return (_flags & JVM_ACC_FIELD_INTERNAL) != 0; }
 
   // get .class file flags
   jint get_flags               () const { return (_flags & JVM_ACC_WRITTEN_FLAGS); }
 
   // Initialization
   void add_promoted_flags(jint flags)   { _flags |= (flags & JVM_ACC_PROMOTED_FLAGS); }
-  void set_field_flags(jint flags)      { _flags = (flags & JVM_ACC_FIELD_FLAGS); }
+  void set_field_flags(jint flags)      {
+    assert((flags & JVM_ACC_FIELD_FLAGS) == flags, "only recognized flags");
+    _flags = (flags & JVM_ACC_FIELD_FLAGS);
+  }
   void set_flags(jint flags)            { _flags = (flags & JVM_ACC_WRITTEN_FLAGS); }
 
   void set_queued_for_compilation()    { atomic_set_bits(JVM_ACC_QUEUED); }
@@ -218,8 +227,8 @@
                                        }
 
   // Conversion
-  jshort as_short()                    { return (jshort)_flags; }
-  jint   as_int()                      { return _flags; }
+  jshort as_short() const              { return (jshort)_flags; }
+  jint   as_int() const                { return _flags; }
 
   inline friend AccessFlags accessFlags_from(jint flags);
 
--- a/src/share/vm/utilities/bitMap.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/utilities/bitMap.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,11 +161,11 @@
 
   // Set or clear the specified bit.
   inline void set_bit(idx_t bit);
-  void clear_bit(idx_t bit);
+  inline void clear_bit(idx_t bit);
 
   // Atomically set or clear the specified bit.
-  bool par_set_bit(idx_t bit);
-  bool par_clear_bit(idx_t bit);
+  inline bool par_set_bit(idx_t bit);
+  inline bool par_clear_bit(idx_t bit);
 
   // Put the given value at the given offset. The parallel version
   // will CAS the value into the bitmap and is quite a bit slower.
--- a/src/share/vm/utilities/exceptions.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/utilities/exceptions.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -58,6 +58,8 @@
 // include hierachy reasons).
 
 class ThreadShadow: public CHeapObj {
+  friend class VMStructs;
+
  protected:
   oop  _pending_exception;                       // Thread has gc actions.
   const char* _exception_file;                   // file information for exception (debugging only)
--- a/src/share/vm/utilities/growableArray.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/utilities/growableArray.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,6 +77,8 @@
 }
 
 class GenericGrowableArray : public ResourceObj {
+  friend class VMStructs;
+
  protected:
   int    _len;          // current length
   int    _max;          // maximum length
@@ -136,6 +138,8 @@
 };
 
 template<class E> class GrowableArray : public GenericGrowableArray {
+  friend class VMStructs;
+
  private:
   E*     _data;         // data array
 
--- a/src/share/vm/utilities/ostream.cpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/utilities/ostream.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -349,7 +349,7 @@
 fileStream::~fileStream() {
   if (_file != NULL) {
     if (_need_close) fclose(_file);
-    _file = NULL;
+    _file      = NULL;
   }
 }
 
@@ -377,6 +377,86 @@
   update_position(s, len);
 }
 
+rotatingFileStream::~rotatingFileStream() {
+  if (_file != NULL) {
+    if (_need_close) fclose(_file);
+    _file      = NULL;
+    FREE_C_HEAP_ARRAY(char, _file_name);
+    _file_name = NULL;
+  }
+}
+
+rotatingFileStream::rotatingFileStream(const char* file_name) {
+  _cur_file_num = 0;
+  _bytes_writen = 0L;
+  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10);
+  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
+  _file = fopen(_file_name, "w");
+  _need_close = true;
+}
+
+rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) {
+  _cur_file_num = 0;
+  _bytes_writen = 0L;
+  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10);
+  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
+  _file = fopen(_file_name, opentype);
+  _need_close = true;
+}
+
+void rotatingFileStream::write(const char* s, size_t len) {
+  if (_file != NULL)  {
+    // Make an unused local variable to avoid warning from gcc 4.x compiler.
+    size_t count = fwrite(s, 1, len, _file);
+    Atomic::add((jlong)count, &_bytes_writen);
+  }
+  update_position(s, len);
+}
+
+// rotate_log must be called from VMThread at safepoint. In case need change parameters
+// for gc log rotation from thread other than VMThread, a sub type of VM_Operation
+// should be created and be submitted to VMThread's operation queue. DO NOT call this
+// function directly. Currently, it is safe to rotate log at safepoint through VMThread.
+// That is, no mutator threads and concurrent GC threads run parallel with VMThread to
+// write to gc log file at safepoint. If in future, changes made for mutator threads or
+// concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
+// must be synchronized.
+void rotatingFileStream::rotate_log() {
+  if (_bytes_writen < (jlong)GCLogFileSize) return;
+#ifdef ASSERT
+  Thread *thread = Thread::current();
+  assert(thread == NULL ||
+         (thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()),
+         "Must be VMThread at safepoint");
+#endif
+  if (NumberOfGCLogFiles == 1) {
+    // rotate in same file
+    rewind();
+    _bytes_writen = 0L;
+    return;
+  }
+
+  // rotate file in names file.0, file.1, file.2, ..., file.<MaxGCLogFileNumbers-1>
+  // close current file, rotate to next file
+  if (_file != NULL) {
+    _cur_file_num ++;
+    if (_cur_file_num >= NumberOfGCLogFiles) _cur_file_num = 0;
+    jio_snprintf(_file_name, strlen(Arguments::gc_log_filename()) + 10, "%s.%d",
+             Arguments::gc_log_filename(), _cur_file_num);
+    fclose(_file);
+    _file = NULL;
+  }
+  _file = fopen(_file_name, "w");
+  if (_file != NULL) {
+    _bytes_writen = 0L;
+    _need_close = true;
+  } else {
+    tty->print_cr("failed to open rotation log file %s due to %s\n",
+                  _file_name, strerror(errno));
+    _need_close = false;
+  }
+}
+
 defaultStream* defaultStream::instance = NULL;
 int defaultStream::_output_fd = 1;
 int defaultStream::_error_fd  = 2;
@@ -749,14 +829,17 @@
 
   gclog_or_tty = tty; // default to tty
   if (Arguments::gc_log_filename() != NULL) {
-    fileStream * gclog = new(ResourceObj::C_HEAP)
-                           fileStream(Arguments::gc_log_filename());
+    fileStream * gclog  = UseGCLogFileRotation ?
+                          new(ResourceObj::C_HEAP)
+                             rotatingFileStream(Arguments::gc_log_filename()) :
+                          new(ResourceObj::C_HEAP)
+                             fileStream(Arguments::gc_log_filename());
     if (gclog->is_open()) {
       // now we update the time stamp of the GC log to be synced up
       // with tty.
       gclog->time_stamp().update_to(tty->time_stamp().ticks());
-      gclog_or_tty = gclog;
     }
+    gclog_or_tty = gclog;
   }
 
   // If we haven't lazily initialized the logfile yet, do it now,
--- a/src/share/vm/utilities/ostream.hpp	Wed Sep 28 23:13:07 2011 +0100
+++ b/src/share/vm/utilities/ostream.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -110,14 +110,15 @@
    // flushing
    virtual void flush() {}
    virtual void write(const char* str, size_t len) = 0;
-   virtual ~outputStream() {}  // close properly on deletion
+   virtual void rotate_log() {} // GC log rotation
+   virtual ~outputStream() {}   // close properly on deletion
 
    void dec_cr() { dec(); cr(); }
    void inc_cr() { inc(); cr(); }
 };
 
 // standard output
-                                // ANSI C++ name collision
+// ANSI C++ name collision
 extern outputStream* tty;           // tty output
 extern outputStream* gclog_or_tty;  // stream for gc log if -Xloggc:<f>, or tty
 
@@ -176,6 +177,7 @@
   FILE* _file;
   bool  _need_close;
  public:
+  fileStream() { _file = NULL; _need_close = false; }
   fileStream(const char* file_name);
   fileStream(const char* file_name, const char* opentype);
   fileStream(FILE* file) { _file = file; _need_close = false; }
@@ -210,6 +212,20 @@
   void flush() {};
 };
 
+class rotatingFileStream : public fileStream {
+ protected:
+  char*  _file_name;
+  jlong  _bytes_writen;
+  uintx  _cur_file_num;             // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1
+ public:
+  rotatingFileStream(const char* file_name);
+  rotatingFileStream(const char* file_name, const char* opentype);
+  rotatingFileStream(FILE* file) : fileStream(file) {}
+  ~rotatingFileStream();
+  virtual void write(const char* c, size_t len);
+  virtual void rotate_log();
+};
+
 void ostream_init();
 void ostream_init_log();
 void ostream_exit();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/quickSort.cpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/quickSort.hpp"
+
+#ifndef PRODUCT
+
+// Unit tests
+
+#include "runtime/os.hpp"
+#include <stdlib.h>
+
+static int test_comparator(int a, int b) {
+  if (a == b) {
+    return 0;
+  }
+  if (a < b) {
+    return -1;
+  }
+  return 1;
+}
+
+static int test_even_odd_comparator(int a, int b) {
+  bool a_is_odd = (a % 2) == 1;
+  bool b_is_odd = (b % 2) == 1;
+  if (a_is_odd == b_is_odd) {
+    return 0;
+  }
+  if (a_is_odd) {
+    return -1;
+  }
+  return 1;
+}
+
+static int test_stdlib_comparator(const void* a, const void* b) {
+  int ai = *(int*)a;
+  int bi = *(int*)b;
+  if (ai == bi) {
+    return 0;
+  }
+  if (ai < bi) {
+    return -1;
+  }
+  return 1;
+}
+
+void QuickSort::print_array(const char* prefix, int* array, int length) {
+  tty->print("%s:", prefix);
+  for (int i = 0; i < length; i++) {
+    tty->print(" %d", array[i]);
+  }
+  tty->print_cr("");
+}
+
+bool QuickSort::compare_arrays(int* actual, int* expected, int length) {
+  for (int i = 0; i < length; i++) {
+    if (actual[i] != expected[i]) {
+      print_array("Sorted array  ", actual, length);
+      print_array("Expected array", expected, length);
+      return false;
+    }
+  }
+  return true;
+}
+
+template <class C>
+bool QuickSort::sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent) {
+  sort<int, C>(arrayToSort, length, comparator, idempotent);
+  return compare_arrays(arrayToSort, expectedResult, length);
+}
+
+bool QuickSort::test_quick_sort() {
+#if 0
+  tty->print_cr("test_quick_sort\n");
+  {
+    int* test_array = NULL;
+    int* expected_array = NULL;
+    assert(sort_and_compare(test_array, expected_array, 0, test_comparator), "Empty array not handled");
+  }
+  {
+    int test_array[] = {3};
+    int expected_array[] = {3};
+    assert(sort_and_compare(test_array, expected_array, 1, test_comparator), "Single value array not handled");
+  }
+  {
+    int test_array[] = {3,2};
+    int expected_array[] = {2,3};
+    assert(sort_and_compare(test_array, expected_array, 2, test_comparator), "Array with 2 values not correctly sorted");
+  }
+  {
+    int test_array[] = {3,2,1};
+    int expected_array[] = {1,2,3};
+    assert(sort_and_compare(test_array, expected_array, 3, test_comparator), "Array with 3 values not correctly sorted");
+  }
+  {
+    int test_array[] = {4,3,2,1};
+    int expected_array[] = {1,2,3,4};
+    assert(sort_and_compare(test_array, expected_array, 4, test_comparator), "Array with 4 values not correctly sorted");
+  }
+  {
+    int test_array[] = {7,1,5,3,6,9,8,2,4,0};
+    int expected_array[] = {0,1,2,3,4,5,6,7,8,9};
+    assert(sort_and_compare(test_array, expected_array, 10, test_comparator), "Array with 10 values not correctly sorted");
+  }
+  {
+    int test_array[] = {4,4,1,4};
+    int expected_array[] = {1,4,4,4};
+    assert(sort_and_compare(test_array, expected_array, 4, test_comparator), "3 duplicates not sorted correctly");
+  }
+  {
+    int test_array[] = {0,1,2,3,4,5,6,7,8,9};
+    int expected_array[] = {0,1,2,3,4,5,6,7,8,9};
+    assert(sort_and_compare(test_array, expected_array, 10, test_comparator), "Already sorted array not correctly sorted");
+  }
+  {
+    // one of the random arrays that found an issue in the partion method.
+    int test_array[] = {76,46,81,8,64,56,75,11,51,55,11,71,59,27,9,64,69,75,21,25,39,40,44,32,7,8,40,41,24,78,24,74,9,65,28,6,40,31,22,13,27,82};
+    int expected_array[] = {6,7,8,8,9,9,11,11,13,21,22,24,24,25,27,27,28,31,32,39,40,40,40,41,44,46,51,55,56,59,64,64,65,69,71,74,75,75,76,78,81,82};
+    assert(sort_and_compare(test_array, expected_array, 42, test_comparator), "Not correctly sorted");
+  }
+  {
+    int test_array[] = {2,8,1,4};
+    int expected_array[] = {1,4,2,8};
+    assert(sort_and_compare(test_array, expected_array, 4, test_even_odd_comparator), "Even/odd not sorted correctly");
+  }
+  {  // Some idempotent tests
+    {
+      // An array of lenght 3 is only sorted by find_pivot. Make sure that it is idempotent.
+      int test_array[] = {1,4,8};
+      int expected_array[] = {1,4,8};
+      assert(sort_and_compare(test_array, expected_array, 3, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {1,7,9,4,8,2};
+      int expected_array[] = {1,7,9,4,8,2};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {1,9,7,4,2,8};
+      int expected_array[] = {1,9,7,4,2,8};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {7,9,1,2,8,4};
+      int expected_array[] = {7,9,1,2,8,4};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {7,1,9,2,4,8};
+      int expected_array[] = {7,1,9,2,4,8};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {9,1,7,4,8,2};
+      int expected_array[] = {9,1,7,4,8,2};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {9,7,1,4,2,8};
+      int expected_array[] = {9,7,1,4,2,8};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+  }
+
+  // test sorting random arrays
+  for (int i = 0; i < 1000; i++) {
+    int length = os::random() % 100;
+    int* test_array = new int[length];
+    int* expected_array = new int[length];
+    for (int j = 0; j < length; j++) {
+        // Choose random values, but get a chance of getting duplicates
+        test_array[j] = os::random() % (length * 2);
+        expected_array[j] = test_array[j];
+    }
+
+    // Compare sorting to stdlib::qsort()
+    qsort(expected_array, length, sizeof(int), test_stdlib_comparator);
+    assert(sort_and_compare(test_array, expected_array, length, test_comparator), "Random array not correctly sorted");
+
+    // Make sure sorting is idempotent.
+    // Both test_array and expected_array are sorted by the test_comparator.
+    // Now sort them once with the test_even_odd_comparator. Then sort the
+    // test_array one more time with test_even_odd_comparator and verify that
+    // it is idempotent.
+    sort(expected_array, length, test_even_odd_comparator, true);
+    sort(test_array, length, test_even_odd_comparator, true);
+    assert(compare_arrays(test_array, expected_array, length), "Sorting identical arrays rendered different results");
+    sort(test_array, length, test_even_odd_comparator, true);
+    assert(compare_arrays(test_array, expected_array, length), "Sorting already sorted array changed order of elements - not idempotent");
+
+    delete[] test_array;
+    delete[] expected_array;
+  }
+#endif
+  return true;
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/quickSort.hpp	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_QUICKSORT_HPP
+#define SHARE_VM_UTILITIES_QUICKSORT_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/debug.hpp"
+
+class QuickSort : AllStatic {
+
+ private:
+  template<class T>
+  static void swap(T* array, int x, int y) {
+    T tmp = array[x];
+    array[x] = array[y];
+    array[y] = tmp;
+  }
+
+  // As pivot we use the median of the first, last and middle elements.
+  // We swap in these three values at the right place in the array. This
+  // means that this method not only returns the index of the pivot
+  // element. It also alters the array so that:
+  //     array[first] <= array[middle] <= array[last]
+  // A side effect of this is that arrays of length <= 3 are sorted.
+  template<class T, class C>
+  static int find_pivot(T* array, int length, C comparator) {
+    assert(length > 1, "length of array must be > 0");
+
+    int middle_index = length / 2;
+    int last_index = length - 1;
+
+    if (comparator(array[0], array[middle_index]) == 1) {
+      swap(array, 0, middle_index);
+    }
+    if (comparator(array[0], array[last_index]) == 1) {
+      swap(array, 0, last_index);
+    }
+    if (comparator(array[middle_index], array[last_index]) == 1) {
+      swap(array, middle_index, last_index);
+    }
+    // Now the value in the middle of the array is the median
+    // of the fist, last and middle values. Use this as pivot.
+    return middle_index;
+  }
+
+  template<class T, class C, bool idempotent>
+  static int partition(T* array, int pivot, int length, C comparator) {
+    int left_index = -1;
+    int right_index = length;
+    T pivot_val = array[pivot];
+
+    while (true) {
+      do {
+        left_index++;
+      } while (comparator(array[left_index], pivot_val) == -1);
+      do {
+        right_index--;
+      } while (comparator(array[right_index], pivot_val) == 1);
+
+      if (left_index < right_index) {
+        if (!idempotent || comparator(array[left_index], array[right_index]) != 0) {
+          swap(array, left_index, right_index);
+        }
+      } else {
+        return right_index;
+      }
+    }
+
+    ShouldNotReachHere();
+    return 0;
+  }
+
+  template<class T, class C, bool idempotent>
+  static void inner_sort(T* array, int length, C comparator) {
+    if (length < 2) {
+      return;
+    }
+    int pivot = find_pivot(array, length, comparator);
+    if (length < 4) {
+      // arrays up to length 3 will be sorted after finding the pivot
+      return;
+    }
+    int split = partition<T, C, idempotent>(array, pivot, length, comparator);
+    int first_part_length = split + 1;
+    inner_sort<T, C, idempotent>(array, first_part_length, comparator);
+    inner_sort<T, C, idempotent>(&array[first_part_length], length - first_part_length, comparator);
+  }
+
+ public:
+  // The idempotent parameter prevents the sort from
+  // reordering a previous valid sort by not swapping
+  // fields that compare as equal. This requires extra
+  // calls to the comparator, so the performance
+  // impact depends on the comparator.
+  template<class T, class C>
+  static void sort(T* array, int length, C comparator, bool idempotent) {
+    // Switch "idempotent" from function paramter to template parameter
+    if (idempotent) {
+      inner_sort<T, C, true>(array, length, comparator);
+    } else {
+      inner_sort<T, C, false>(array, length, comparator);
+    }
+  }
+
+  // for unit testing
+#ifndef PRODUCT
+  static void print_array(const char* prefix, int* array, int length);
+  static bool compare_arrays(int* actual, int* expected, int length);
+  template <class C> static bool sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent = false);
+  static bool test_quick_sort();
+#endif
+};
+
+
+#endif //SHARE_VM_UTILITIES_QUICKSORT_HPP
--- a/test/compiler/5091921/Test7005594.sh	Wed Sep 28 23:13:07 2011 +0100
+++ b/test/compiler/5091921/Test7005594.sh	Thu Dec 22 15:46:11 2011 +0000
@@ -43,6 +43,45 @@
 echo "TESTCLASSES=${TESTCLASSES}"
 echo "CLASSPATH=${CLASSPATH}"
 
+# Amount of physical memory in megabytes
+MEM=0
+if [ -f "/proc/meminfo" ]; then
+  # Linux, Windows/Cygwin
+  MEM=`cat /proc/meminfo |grep ^MemTotal: | awk '{print $2}'`
+  MEM="$(($MEM / 1024))"
+elif [ -x "/usr/sbin/prtconf" ]; then
+  # Solaris
+  MEM=`/usr/sbin/prtconf | grep "^Memory size" | awk '{print $3}'`
+elif [ -x "/usr/sbin/system_profiler" ]; then
+  # MacOS
+  MEMo=`/usr/sbin/system_profiler SPHardwareDataType | grep Memory:`
+  MEM=`echo "$MEMo" | awk '{print $2}'`
+  MEMu=`echo "$MEMo" | awk '{print $3}'`
+  case $MEMu in
+  GB)
+    MEM="$(($MEM * 1024))"
+    ;;
+  MB)
+    ;;
+  *)
+    echo "Unknown memory unit in system_profile output: $MEMu"
+    ;;
+  esac
+elif [ -n "$ROOTDIR" -a -x "$ROOTDIR/mksnt/sysinf" ]; then
+  # Windows/MKS
+  MEM=`"$ROOTDIR/mksnt/sysinf" memory -v | grep "Total Physical Memory: " | sed 's/Total Physical Memory: *//g'`
+  MEM="$(($machine_memory / 1024))"
+else
+  echo "Unable to determine amount of physical memory on the machine"
+fi
+
+if [ $MEM -lt 2000 ]; then
+  echo "Test skipped due to low (or unknown) memory on the system: $MEM Mb"
+  exit 0
+fi
+
+echo "MEMORY=$MEM Mb"
+
 set -x
 
 cp ${TESTSRC}/Test7005594.java .
@@ -50,7 +89,7 @@
 
 ${TESTJAVA}/bin/javac -d . Test7005594.java
 
-${TESTJAVA}/bin/java ${TESTVMOPTS} -Xms1600m -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Xms1600m -XX:+IgnoreUnrecognizedVMOptions -XX:-ZapUnusedHeapArea -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1
 
 result=$?
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6478991/NullCheckTest.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6478991
+ * @summary C1 NullCheckEliminator yields incorrect exceptions
+ *
+ * @run main/othervm -XX:CompileOnly=NullCheckTest.test,NullCheckTest.inlined  -Xcomp NullCheckTest
+ */
+
+public class NullCheckTest {
+        static class A {
+                int f;
+
+                public final void inlined(A a) {
+                        // This cast is intended to fail.
+                        B b = ((B) a);
+                }
+        }
+
+        static class B extends A {
+        }
+
+
+        private static void test(A a1, A a2) {
+                // Inlined call must do a null check on a1.
+                // However, the exlipcit NullCheck instruction is eliminated and
+                // the null check is folded into the field load below, so the
+                // exception in the inlined method is thrown before the null check
+                // and the NullPointerException is not thrown.
+                a1.inlined(a2);
+
+                int x = a1.f;
+        }
+
+        public static void main(String[] args) {
+                // load classes
+                new B();
+                try {
+                        test(null, new A());
+
+                        throw new InternalError("FAILURE: no exception");
+                } catch (NullPointerException ex) {
+                        System.out.println("CORRECT: NullPointerException");
+                } catch (ClassCastException ex) {
+                        System.out.println("FAILURE: ClassCastException");
+                        throw ex;
+                }
+        }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6990212/Test6990212.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6990212
+ * @summary JSR 292 JVMTI MethodEnter hook is not called for JSR 292 bootstrap and target methods
+ *
+ * @run main Test6990212
+ */
+
+import java.lang.invoke.*;
+
+interface intf {
+    public Object target();
+}
+
+public class Test6990212 implements intf {
+    public Object target() {
+        return null;
+    }
+
+    public static void main(String[] args) throws Throwable {
+        // Build an interface invoke and then invoke it on something
+        // that doesn't implement the interface to test the
+        // raiseException path.
+        MethodHandle target = MethodHandles.lookup().findVirtual(intf.class, "target",  MethodType.methodType(Object.class));
+        try {
+            target.invoke(new Object());
+        } catch (ClassCastException cce) {
+            // everything is ok
+            System.out.println("got expected ClassCastException");
+        }
+    }
+}
--- a/test/compiler/7052494/Test7052494.java	Wed Sep 28 23:13:07 2011 +0100
+++ b/test/compiler/7052494/Test7052494.java	Thu Dec 22 15:46:11 2011 +0000
@@ -70,18 +70,24 @@
   }
 
   // Empty loop rolls through MAXINT if i > 0
+
+  static final int limit5 = Integer.MIN_VALUE + 10000;
+
   static int test5(int i) {
     int result = 0;
-    while (i++ != 0) {
+    while (i++ != limit5) {
       result = i*2;
     }
     return result;
   }
 
   // Empty loop rolls through MININT if i < 0
+
+  static final int limit6 = Integer.MAX_VALUE - 10000;
+
   static int test6(int i) {
     int result = 0;
-    while (i-- != 0) {
+    while (i-- != limit6) {
       result = i*2;
     }
     return result;
@@ -92,6 +98,7 @@
     int[] arr = new int[8];
     int[] ar3 = { 0, 0, 4, 6, 8, 10, 0, 0 };
     int[] ar4 = { 0, 0, 0, -10, -8, -6, -4, 0 };
+    System.out.println("test1");
     for (int i = 0; i < 11000; i++) {
       int k = test1(1, 10);
       if (k != 10) {
@@ -100,6 +107,7 @@
         break;
       }
     }
+    System.out.println("test2");
     for (int i = 0; i < 11000; i++) {
       int k = test2(-1, -10);
       if (k != -10) {
@@ -108,6 +116,7 @@
         break;
       }
     }
+    System.out.println("test3");
     for (int i = 0; i < 11000; i++) {
       java.util.Arrays.fill(arr, 0);
       test3(1, 10, arr);
@@ -124,6 +133,7 @@
         break;
       }
     }
+    System.out.println("test4");
     for (int i = 0; i < 11000; i++) {
       java.util.Arrays.fill(arr, 0);
       test4(-1, -10, arr);
@@ -140,22 +150,25 @@
         break;
       }
     }
+    System.out.println("test5");
     for (int i = 0; i < 11000; i++) {
-      int k = test5(1);
-      if (k != 0) {
-        System.out.println("FAILED: " + k + " != 0");
+      int k = test5(limit6);
+      if (k != limit5*2) {
+        System.out.println("FAILED: " + k + " != " + limit5*2);
         failed = true;
         break;
       }
     }
+    System.out.println("test6");
     for (int i = 0; i < 11000; i++) {
-      int k = test6(-1);
-      if (k != 0) {
-        System.out.println("FAILED: " + k + " != 0");
+      int k = test6(limit5);
+      if (k != limit6*2) {
+        System.out.println("FAILED: " + k + " != " + limit6*2);
         failed = true;
         break;
       }
     }
+    System.out.println("finish");
     if (failed)
       System.exit(97);
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7068051/Test7068051.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7068051
+ * @summary SIGSEGV in PhaseIdealLoop::build_loop_late_post on T5440
+ *
+ * @run shell/timeout=300 Test7068051.sh
+ */
+
+import java.io.*;
+import java.nio.*;
+import java.util.*;
+import java.util.zip.*;
+
+public class Test7068051 {
+
+    public static void main (String[] args) throws Throwable {
+
+        ZipFile zf = new ZipFile(args[0]);
+
+        Enumeration<? extends ZipEntry> entries = zf.entries();
+        ArrayList<String> names = new ArrayList<String>();
+        while (entries.hasMoreElements()) {
+            names.add(entries.nextElement().getName());
+        }
+
+        byte[] bytes = new byte[16];
+        for (String name : names) {
+            ZipEntry e = zf.getEntry(name);
+
+            if (e.isDirectory())
+                continue;
+
+            final InputStream is = zf.getInputStream(e);
+
+            try  {
+                while (is.read(bytes) >= 0) {
+                }
+                is.close();
+
+            } catch (IOException x) {
+                 System.out.println("..................................");
+                 System.out.println("          -->  is :" + is);
+                 System.out.println("          is.hash :" + is.hashCode());
+                 System.out.println();
+                 System.out.println("           e.name :" + e.getName());
+                 System.out.println("           e.hash :" + e.hashCode());
+                 System.out.println("         e.method :" + e.getMethod());
+                 System.out.println("           e.size :" + e.getSize());
+                 System.out.println("          e.csize :" + e.getCompressedSize());
+
+                 x.printStackTrace();
+                 System.out.println("..................................");
+                 System.exit(97);
+            }
+        }
+        zf.close();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7068051/Test7068051.sh	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,49 @@
+#!/bin/sh
+# 
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+# 
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+# 
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+# 
+# 
+
+if [ "${TESTSRC}" = "" ]
+then
+  echo "TESTSRC not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTSRC=${TESTSRC}"
+if [ "${TESTJAVA}" = "" ]
+then
+  echo "TESTJAVA not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTJAVA=${TESTJAVA}"
+
+set -x
+
+${TESTJAVA}/bin/jar xf ${TESTJAVA}/jre/lib/javaws.jar
+${TESTJAVA}/bin/jar cf foo.jar *
+cp ${TESTSRC}/Test7068051.java ./
+${TESTJAVA}/bin/jar -uf0 foo.jar Test7068051.java
+
+${TESTJAVA}/bin/javac -d . Test7068051.java
+
+${TESTJAVA}/bin/java -showversion -Xbatch ${TESTVMOPTS} Test7068051 foo.jar
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7082949/Test7082949.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7082949
+ * @summary JSR 292: missing ResourceMark in methodOopDesc::make_invoke_method
+ *
+ * @run main Test7082949
+ */
+
+import java.lang.invoke.*;
+import static java.lang.invoke.MethodHandles.*;
+import static java.lang.invoke.MethodType.*;
+
+public class Test7082949 implements Runnable {
+    public static void main(String... args) throws Throwable {
+        new Thread(new Test7082949()).start();
+    }
+
+    public static Test7082949 test() {
+        return null;
+    }
+
+    public void run() {
+        try {
+            MethodHandle m1 = MethodHandles.lookup().findStatic(Test7082949.class, "test",  methodType(Test7082949.class));
+            Test7082949 v = (Test7082949)m1.invokeExact();
+        } catch (Throwable t) {
+            t.printStackTrace();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7088020/Test7088020.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7088020
+ * @summary SEGV in JNIHandleBlock::release_block
+ *
+ * @run main Test7088020
+ */
+
+import java.lang.invoke.*;
+
+
+public class Test7088020 {
+    public static boolean test() {
+        return false;
+    }
+
+    public static void main(String... args) throws Throwable {
+        MethodHandle test = MethodHandles.lookup().findStatic(Test7088020.class, "test",  MethodType.methodType(Boolean.TYPE));
+
+        // Exercise WMT with different argument alignments
+        int thrown = 0;
+        try {
+            test.invokeExact(0);
+        } catch (WrongMethodTypeException wmt) {
+            thrown++;
+            if (wmt.getStackTrace().length < 1) throw new InternalError("missing stack frames");
+        }
+        try {
+            test.invokeExact(0, 1);
+        } catch (WrongMethodTypeException wmt) {
+            thrown++;
+            if (wmt.getStackTrace().length < 1) throw new InternalError("missing stack frames");
+        }
+        try {
+            test.invokeExact(0, 1, 2);
+        } catch (WrongMethodTypeException wmt) {
+            thrown++;
+            if (wmt.getStackTrace().length < 1) throw new InternalError("missing stack frames");
+        }
+        try {
+            test.invokeExact(0, 1, 2, 3);
+        } catch (WrongMethodTypeException wmt) {
+            thrown++;
+            if (wmt.getStackTrace().length < 1) throw new InternalError("missing stack frames");
+        }
+        try {
+            thrown++;
+            test.invokeExact(0, 1, 2, 3, 4);
+        } catch (WrongMethodTypeException wmt) {
+            if (wmt.getStackTrace().length < 1) throw new InternalError("missing stack frames");
+        }
+        if (thrown != 5) {
+            throw new InternalError("not enough throws");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7100757/Test7100757.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7100757
+ * @summary The BitSet.nextSetBit() produces incorrect result in 32bit VM on Sparc
+ *
+ * @run main/timeout=300 Test7100757
+ */
+
+import java.util.*;
+
+public class Test7100757 {
+
+  public static final int NBITS = 256;
+
+  public static void main(String[] args) {
+
+    BitSet bs = new BitSet(NBITS);
+    Random rnd = new Random();
+    long[] ra = new long[(NBITS+63)/64];
+
+    for(int l=0; l < 5000000; l++) {
+
+      for(int r = 0; r < ra.length; r++) {
+        ra[r] = rnd.nextLong();
+      }
+      test(ra, bs);
+    }
+  }
+
+  static void test(long[] ra, BitSet bs) {
+      bs.clear();
+      int bits_set = 0;
+      for(int i = 0, t = 0, b = 0; i < NBITS; i++) {
+        long bit = 1L << b++;
+        if((ra[t]&bit) != 0) {
+          bs.set(i);
+          bits_set++;
+        }
+        if(b == 64) {
+          t++;
+          b = 0;
+        }
+      }
+      // Test Long.bitCount()
+      int check_bits = bs.cardinality();
+      if (check_bits != bits_set) {
+        String bs_str = bs.toString();
+        System.err.printf("cardinality bits: %d != %d  bs: %s\n", check_bits, bits_set, bs_str);
+        System.exit(97);
+      }
+      // Test Long.numberOfTrailingZeros()
+      check_bits = 0;
+      for (int i = bs.nextSetBit(0); i >= 0; i = bs.nextSetBit(i+1)) {
+        check_bits++;
+      }
+      if (check_bits != bits_set) {
+        String bs_str = bs.toString();
+        System.err.printf("nextSetBit bits: %d != %d  bs: %s\n", check_bits, bits_set, bs_str);
+        System.exit(97);
+      }
+      // Test Long.numberOfLeadingZeros()
+      for(int i = bs.length(); i > 0; i = bs.length()) {
+        bs.clear(i-1);
+      }
+      // Test Long.bitCount()
+      check_bits = bs.cardinality();
+      if (check_bits != 0) {
+        String bs_str = bs.toString();
+        System.err.printf("after clear bits: %d != 0  bs: %s\n", check_bits, bs_str);
+        System.exit(97);
+      }
+  }
+
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7110586/Test7110586.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7110586
+ * @summary C2 generates icorrect results
+ *
+ * @run main/othervm -Xbatch Test7110586
+ */
+
+public class Test7110586 {
+  static int test1() {
+    int i = 0;
+    for ( ; i < 11; i+=1) {}
+    return i;
+  }
+  static int test2() {
+    int i = 0;
+    for ( ; i < 11; i+=2) {}
+    return i;
+  }
+  static int test3() {
+    int i = 0;
+    for ( ; i < 11; i+=3) {}
+    return i;
+  }
+  static int test11() {
+    int i = 0;
+    for ( ; i < 11; i+=11) {}
+    return i;
+  }
+
+  static int testm1() {
+    int i = 0;
+    for ( ; i > -11; i-=1) {}
+    return i;
+  }
+  static int testm2() {
+    int i = 0;
+    for ( ; i > -11; i-=2) {}
+    return i;
+  }
+  static int testm3() {
+    int i = 0;
+    for ( ; i > -11; i-=3) {}
+    return i;
+  }
+  static int testm11() {
+    int i = 0;
+    for ( ; i > -11; i-=11) {}
+    return i;
+  }
+
+  public static void main(String args[]) {
+    int x1  = 0;
+    int x2  = 0;
+    int x3  = 0;
+    int x11 = 0;
+    int m1  = 0;
+    int m2  = 0;
+    int m3  = 0;
+    int m11 = 0;
+    for (int i=0; i<10000; i++) {
+      x1  = test1();
+      x2  = test2();
+      x3  = test3();
+      x11 = test11();
+      m1  = testm1();
+      m2  = testm2();
+      m3  = testm3();
+      m11 = testm11();
+    }
+    boolean failed = false;
+    if (x1 != 11) {
+      System.out.println("ERROR (incr = +1): " + x1 + " != 11");
+      failed = true;
+    }
+    if (x2 != 12) {
+      System.out.println("ERROR (incr = +2): " + x2 + " != 12");
+      failed = true;
+    }
+    if (x3 != 12) {
+      System.out.println("ERROR (incr = +3): " + x3 + " != 12");
+      failed = true;
+    }
+    if (x11 != 11) {
+      System.out.println("ERROR (incr = +11): " + x11 + " != 11");
+      failed = true;
+    }
+    if (m1 != -11) {
+      System.out.println("ERROR (incr = -1): " + m1 + " != -11");
+      failed = true;
+    }
+    if (m2 != -12) {
+      System.out.println("ERROR (incr = -2): " + m2 + " != -12");
+      failed = true;
+    }
+    if (m3 != -12) {
+      System.out.println("ERROR (incr = -3): " + m3 + " != -12");
+      failed = true;
+    }
+    if (m11 != -11) {
+      System.out.println("ERROR (incr = -11): " + m11 + " != -11");
+      failed = true;
+    }
+    if (failed) {
+      System.exit(97);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/6941923/test6941923.sh	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,179 @@
+##
+## @test @(#)test6941923.sh
+## @bug 6941923 
+## @summary test new added flags for gc log rotation 
+## @author yqi 
+## @run shell test6941923.sh
+##
+
+## skip on windows
+OS=`uname -s`
+case "$OS" in
+  SunOS | Linux )
+    NULL=/dev/null
+    PS=":"
+    FS="/"
+    ;;
+  Windows_* )
+    echo "Test skipped for Windows"
+    exit 0 
+    ;;
+  * )
+    echo "Unrecognized system!"
+    exit 1;
+    ;;
+esac
+
+if [ "${JAVA_HOME}" = "" ]
+then
+  echo "JAVA_HOME not set"
+  exit 0
+fi
+
+$JAVA_HOME/bin/java -version > $NULL 2>&1
+
+if [ $? != 0 ]; then
+  echo "Wrong JAVA_HOME? JAVA_HOME: $JAVA_HOME"
+  exit $?
+fi
+
+# create a small test case
+testname="Test"
+if [ -e ${testname}.java ]; then
+  rm -rf ${testname}.*
+fi
+
+cat >> ${testname}.java << __EOF__
+import java.util.Vector;
+
+public class Test implements Runnable
+{
+  private boolean _should_stop = false;
+
+  public static void main(String[] args) throws Exception {
+
+    long limit = Long.parseLong(args[0]) * 60L * 1000L;   // minutes
+    Test t = new Test();
+    t.set_stop(false);
+    Thread thr = new Thread(t);
+    thr.start();
+
+    long time1 = System.currentTimeMillis();
+    long time2 = System.currentTimeMillis();
+    while (time2 - time1 < limit) {
+      try {
+        Thread.sleep(2000); // 2 seconds
+      }
+      catch(Exception e) {}
+      time2 = System.currentTimeMillis();
+      System.out.print("\r... " + (time2 - time1)/1000 + " seconds");
+    }
+    System.out.println();
+    t.set_stop(true);
+  }
+  public void set_stop(boolean value) { _should_stop = value; }
+  public void run() {
+    int cap = 20000;
+    int fix_size = 2048;
+    int loop = 0;
+    Vector< byte[] > v = new Vector< byte[] >(cap);
+    while(!_should_stop) {
+      byte[] g = new byte[fix_size];
+      v.add(g);
+      loop++;
+      if (loop > cap) {
+         v = null;
+         cap *= 2;
+         if (cap > 80000) cap = 80000;
+         v = new Vector< byte[] >(cap);
+      }
+    }
+  }
+}
+__EOF__
+
+msgsuccess="succeeded"
+msgfail="failed"
+gclogsize="16K"
+filesize=$((16*1024))
+$JAVA_HOME/bin/javac ${testname}.java > $NULL 2>&1
+
+if [ $? != 0 ]; then
+  echo "$JAVA_HOME/bin/javac ${testname}.java $fail"
+  exit -1
+fi
+
+# test for 2 minutes, it will complete circulation of gc log rotation
+tts=2
+logfile="test.log"
+hotspotlog="hotspot.log"
+
+if [ -e $logfile  ]; then
+  rm -rf $logfile
+fi
+
+#also delete $hotspotlog if it exists
+if [ -f $hotspotlog ]; then 
+  rm -rf $hotspotlog
+fi
+
+options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation  -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=$gclogsize"
+echo "Test gc log rotation in same file, wait for $tts minutes ...."
+$JAVA_HOME/bin/java $options $testname $tts
+if [ $? != 0 ]; then
+  echo "$msgfail"
+  exit -1
+fi
+
+# rotation file will be $logfile.0 
+if [ -f $logfile.0 ]; then
+  outfilesize=`ls -l $logfile.0 | awk '{print $5 }'`
+  if [ $((outfilesize)) -ge $((filesize)) ]; then
+    echo $msgsuccess
+  else
+    echo $msgfail
+  fi
+else 
+  echo $msgfail
+  exit -1
+fi
+
+# delete log file 
+rm -rf $logfile.0
+if [ -f $hotspotlog ]; then
+  rm -rf $hotspotlog
+fi
+
+#multiple log files
+numoffiles=3
+options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation  -XX:NumberOfGCLogFiles=$numoffiles -XX:GCLogFileSize=$gclogsize"
+echo "Test gc log rotation in $numoffiles files, wait for $tts minutes ...."
+$JAVA_HOME/bin/java $options $testname $tts
+if [ $? != 0 ]; then
+  echo "$msgfail"
+  exit -1
+fi
+
+atleast=0    # at least size of numoffile-1 files >= $gclogsize
+tk=0
+while [ $(($tk)) -lt $(($numoffiles)) ]
+do
+  if [ -f $logfile.$tk ]; then
+    outfilesize=`ls -l $logfile.$tk | awk '{ print $5 }'`
+    if [ $(($outfilesize)) -ge $(($filesize)) ]; then
+      atleast=$((atleast+1))
+    fi
+  fi
+  tk=$((tk+1))
+done
+
+rm -rf $logfile.*
+rm -rf $testname.*
+rm -rf $hotspotlog
+
+if [ $(($atleast)) -ge $(($numoffiles-1)) ]; then
+  echo $msgsuccess
+else
+  echo $msgfail
+  exit -1
+fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/7072527/TestFullGCCount.java	Thu Dec 22 15:46:11 2011 +0000
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestFullGCount.java
+ * @bug 7072527
+ * @summary CMS: JMM GC counters overcount in some cases
+ * @run main/othervm -XX:+UseConcMarkSweepGC TestFullGCCount
+ *
+ */
+import java.util.*;
+import java.lang.management.*;
+
+public class TestFullGCCount {
+
+    public String collectorName = "ConcurrentMarkSweep";
+
+    public static void main(String [] args) {
+
+        TestFullGCCount t = null;
+        if (args.length==2) {
+            t = new TestFullGCCount(args[0], args[1]);
+        } else {
+            t = new TestFullGCCount();
+        }
+        System.out.println("Monitoring collector: " + t.collectorName);
+        t.run();
+    }
+
+    public TestFullGCCount(String pool, String collector) {
+        collectorName = collector;
+    }
+
+    public TestFullGCCount() {
+    }
+
+    public void run() {
+        int count = 0;
+        int iterations = 20;
+        long counts[] = new long[iterations];
+        boolean diffAlways2 = true; // assume we will fail
+
+        for (int i=0; i<iterations; i++) {
+            System.gc();
+            counts[i] = getCollectionCount();
+            if (i>0) {
+                if (counts[i] - counts[i-1] != 2) {
+                    diffAlways2 = false;
+                }
+            }
+        }
+        if (diffAlways2) {
+            throw new RuntimeException("FAILED: System.gc must be incrementing count twice.");
+        }
+        System.out.println("Passed.");
+    }
+
+    private long getCollectionCount() {
+        long count = 0;
+        List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
+        List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
+        for (int i=0; i<collectors.size(); i++) {
+            GarbageCollectorMXBean collector = collectors.get(i);
+            String name = collector.getName();
+            if (name.contains(collectorName)) {
+                System.out.println(name + ": collection count = "
+                                   + collector.getCollectionCount());
+                count = collector.getCollectionCount();
+            }
+        }
+        return count;
+    }
+
+}
+